def empty_response(self): response = HttpResponse( json.dumps((), cls=DjangoJSONEncoder), content_type='application/javascript' ) add_never_cache_headers(response) return response
def get(self, request, slug): event = self.get_event(slug, request) if not self.can_view_event(event, request): return self.cant_view_event(event, request) if not self.can_edit_event(event, request): return self.cant_edit_event(event, request) if request.GET.get('all'): qs = Chapter.objects.filter( event=event, is_active=True ) chapters = [] for chapter in qs.select_related('user'): chapters.append({ 'timestamp': chapter.timestamp, 'text': chapter.text, 'user': { 'email': chapter.user.email, 'first_name': chapter.user.first_name, 'last_name': chapter.user.last_name, }, 'js_date_tag': js_date(chapter.modified), }) response = http.JsonResponse({'chapters': chapters}) add_never_cache_headers(response) return response video = get_video_tagged(event, request) context = { 'event': event, 'video': video, } return render(request, self.template_name, context)
def get(self, request, *args, **kwargs): response = HttpResponse( json.dumps(self.get_choices(), cls=DjangoJSONEncoder), content_type='application/javascript' ) add_never_cache_headers(response) return response
def update_response_headers(request, response): if not getattr(response, "override_serialization", False): serialization = request.serialization if serialization == "xml": response["Content-Type"] = "application/xml; charset=UTF-8" elif serialization == "json": response["Content-Type"] = "application/json; charset=UTF-8" elif serialization == "text": response["Content-Type"] = "text/plain; charset=UTF-8" else: raise ValueError("Unknown serialization format '%s'" % serialization) if settings.DEBUG or getattr(settings, "TEST", False): response["Date"] = format_date_time(time()) if not response.has_header("Content-Length"): _base_content_is_iter = getattr(response, '_base_content_is_iter', None) if (_base_content_is_iter is not None and not _base_content_is_iter): response["Content-Length"] = len(response.content) else: if not (response.has_header('Content-Type') and response['Content-Type'].startswith( 'multipart/byteranges')): # save response content from been consumed if it is an iterator response._container, data = itertools.tee(response._container) response["Content-Length"] = len(str(data)) cache.add_never_cache_headers(response) # Fix Vary and Cache-Control Headers. Issue: #3448 cache.patch_vary_headers(response, ('X-Auth-Token',)) cache.patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def api_state(request): # .values('id', 'datecreated', 'lat', 'lon', 'datelive', 'datefinished', 'topic__name', 'parsed_geocode') response = { 'active_sessions': [{ 'id': s.id, 'datecreated': s.datecreated, 'lat': s.lat, 'lon': s.lon, 'datelive': s.datelive, 'datefinished': s.datefinished, 'topic__name': s.topic.public and s.topic.name or 'something', 'parsed_geocode': s.parsed_geocode() } for s in get_active_sessions()], 'completed_sessions': [{ 'id': s.id, 'datecreated': s.datecreated, 'lat': s.lat, 'lon': s.lon, 'datelive': s.datelive, 'datefinished': s.datefinished, 'topic__name': s.topic.name, 'parsed_geocode': s.parsed_geocode(), 'get_absolute_url': s.get_absolute_url(), } for s in get_completed_sessions()] } # However we don't want the client to cache this page ever response = HttpResponse(json.dumps(response, cls=DjangoJSONEncoder), content_type='application/json') add_never_cache_headers(response) return response
def update_response_headers(request, response): if not getattr(response, "override_serialization", False): serialization = request.serialization if serialization == "xml": response["Content-Type"] = "application/xml; charset=UTF-8" elif serialization == "json": response["Content-Type"] = "application/json; charset=UTF-8" elif serialization == "text": response["Content-Type"] = "text/plain; charset=UTF-8" else: raise ValueError("Unknown serialization format '%s'" % serialization) if settings.DEBUG or getattr(settings, "TEST", False): response["Date"] = format_date_time(time()) if not response.has_header("Content-Length") and \ isinstance(response, HttpResponse): response["Content-Length"] = len(response.content) cache.add_never_cache_headers(response) # Fix Vary and Cache-Control Headers. Issue: #3448 cache.patch_vary_headers(response, ('X-Auth-Token',)) cache.patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
def home (request): if request.method == "GET": try: id= request.session['id_picture'] except: id = randomword(40) request.session['id_picture'] = id urls = '/static/img/'+str(id)+'.jpg' canvasw = 800 canvash = 600 path = os.path.join(os.path.dirname(__file__), 'static').replace('\\','/') path = os.path.join(path, 'img').replace('\\','/') name = str(request.session['id_picture']) + '.jpg' path = os.path.join(path, name).replace('\\','/') try: img = Image.open(path) canvasw, canvash = img.size except: canvasw = 800 canvash = 600 d = {'url_img': urls,'canvasw' : canvasw,'canvash' : canvash} t = get_template("painter.html") c = Context(d) html = t.render(c) response = HttpResponse(html) add_never_cache_headers(response) return response
def get(self, request, slug): event = self.get_event(slug, request) if not self.can_view_event(event, request): return self.cant_view_event(event, request) if not self.can_edit_event(event, request): return self.cant_edit_event(event, request) if request.GET.get("all"): qs = Chapter.objects.filter(event=event, is_active=True) chapters = [] for chapter in qs.select_related("user"): chapters.append( { "timestamp": chapter.timestamp, "text": chapter.text, "user": { "email": chapter.user.email, "first_name": chapter.user.first_name, "last_name": chapter.user.last_name, }, "js_date_tag": js_date(chapter.modified), } ) response = http.JsonResponse({"chapters": chapters}) add_never_cache_headers(response) return response video = get_video_tagged(event, request) context = {"event": event, "video": video} return render(request, self.template_name, context)
def __init__(self, content, *args, **kwargs): if 'dict' in dir(content) and callable(content.dict): dict_content = content.dict() else: dict_content = content super(JsonResponse, self).__init__(json.dumps(dict_content), content_type='application/json', *args, **kwargs) add_never_cache_headers(self)
def get(self, request, *args, **kwargs): # field = request.GET.get("field") parent_value = request.GET.get("parent_value") if parent_value: parent_value_int = int(parent_value) #else: #raise ValueError rvu_desc = dict(RESP_VAR_UNITS) if not parent_value: choices = (('', '--------'), ) elif parent_value_int == RVP.BASE_SHEAR: choices = ((RVU.KN, rvu_desc[str(RVU.KN)]), ) elif parent_value_int == RVP.BASE_BENDING_MOMENT: choices = ((RVU.KNM, rvu_desc[str(RVU.KNM)]), ) elif parent_value_int == RVP.SPECTRAL_ACCELERATION: choices = ((RVU.G, rvu_desc[str(RVU.G)]), (RVU.CMS2, rvu_desc[str(RVU.CMS2)]), (RVU.MS2, rvu_desc[str(RVU.MS2)])) elif parent_value_int == RVP.BASE_SHEAR_COEF: choices = ((RVU.NO_DIM, rvu_desc[str(RVU.NO_DIM)]), ) else: raise ValueError response = HttpResponse( json.dumps(choices, cls=DjangoJSONEncoder), mimetype='application/javascript' ) add_never_cache_headers(response) return response
def process_request(self, request): token = request.GET.get(app_settings.KEY) if not token: return r = redirect(strip_token(request.get_full_path())) try: user_id = int(token.split(':', 1)[0]) # Only change user if necessary. We strip the token in any case. if request.user.id == user_id: return r user = User.objects.get(id=user_id) except (ValueError, User.DoesNotExist): return r try: TimestampSigner(salt=get_user_salt(user)).unsign( token, max_age=app_settings.MAX_AGE, ) except BadSignature: return r response = self.render( request, user, token, strip_token(request.get_full_path()), ) add_never_cache_headers(response) return response
def fudge_headers(response, stats): """Alter cache headers. Don't cache content where data could be missing.""" if not stats: add_never_cache_headers(response) else: seven_days = 60 * 60 * 24 * 7 patch_cache_control(response, max_age=seven_days)
def get(self, request, *args, **kwargs): parent_value = request.GET.get("parent_value") if parent_value: parent_value_int = int(parent_value) #else: #raise ValueError if not parent_value: choices = (('', '--------'), ) elif parent_value_int in (IMT.PGA, IMT.SAT): choices = ((IMU.G, 'g'), (IMU.CM_S2, 'cm/s^2'), (IMU.M_S2, 'm/s^2')) elif parent_value_int in (IMT.PGV, IMT.IA): choices = ((IMU.CM_S, 'cm/s'), (IMU.M_S, 'm/s')) elif parent_value_int in (IMT.PGD, IMT.SDT): choices = ((IMU.CM, 'cm'), (IMU.M, 'm')) elif parent_value_int == IMT.RSD: choices = ((IMU.S, 's'), ) elif parent_value_int == IMT.CAV: choices = ((IMU.GS, 'g-s'), ) elif parent_value_int == IMT.MMI: choices = ((IMU.ROMAN, 'Roman numbers'), ) else: raise ValueError response = HttpResponse( json.dumps(choices, cls=DjangoJSONEncoder), mimetype='application/javascript' ) add_never_cache_headers(response) return response
def get_maintenance_response(request): """ Return a '503 Service Unavailable' maintenance response. """ if settings.MAINTENANCE_MODE_REDIRECT_URL: return redirect(settings.MAINTENANCE_MODE_REDIRECT_URL) context = {} if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT: try: get_request_context_func = import_string( settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT ' 'is not a valid function path.' ) context = get_request_context_func(request=request) if django.VERSION < (1, 8): kwargs = {'context_instance': RequestContext(request, context)} else: kwargs = {'context': context} response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, status=settings.MAINTENANCE_MODE_STATUS_CODE, **kwargs) response['Retry-After'] = settings.MAINTENANCE_MODE_RETRY_AFTER add_never_cache_headers(response) return response
def dispatch_api(self, api_request): if api_request.method.lower() in self.http_method_names: handler = getattr(self, api_request.method.lower(), self.handle_link_submission) else: handler = self.http_method_not_allowed self.api_request = api_request self.args = api_request.url_args self.kwargs = api_request.url_kwargs self.initialize_state() assert self.state is not None assert self.resource != self assert self.resource.state is not None assert self.common_state is not None #resource.state is not initialized! self.pre_dispatch() self.common_state.update(self.get_common_state_data()) permission_response = self.api_permission_check(api_request) if permission_response is not None: response_or_link = permission_response else: response_or_link = handler(api_request) if isinstance(response_or_link, Link): #TODO TemplateResponse with a link response = self.generate_response(response_or_link) else: response = response_or_link if not self.cacheable: add_never_cache_headers(response) return response
def process_response(self, request, response): if not self.is_cms_request(request): return response from django.utils.cache import add_never_cache_headers if ((hasattr(request, 'toolbar') and request.toolbar.edit_mode) or not all(ph.cache_placeholder for ph in getattr(request, 'placeholders', ()))): add_never_cache_headers(response) if hasattr(request, 'user') and request.user.is_staff and response.status_code != 500: try: pk = LogEntry.objects.filter( user=request.user, action_flag__in=(ADDITION, CHANGE) ).only('pk').order_by('-pk')[0].pk if hasattr(request, 'cms_latest_entry') and request.cms_latest_entry != pk: log = LogEntry.objects.filter(user=request.user, action_flag__in=(ADDITION, CHANGE))[0] request.session['cms_log_latest'] = log.pk # If there were no LogEntries, just don't touch the session. # Note that in the case of a user logging-in as another user, # request may have a cms_latest_entry attribute, but there are no # LogEntries for request.user. except IndexError: pass return response
def get(self, request, *args, **kwargs): """Override default get function to use token if there is one to retrieve object. If a subclass should use their own GET implementation, token_from_kwargs should be called if that detail view should be accessible via token.""" self.object = self.get_object() allow_anonymous = kwargs.get("allow_anonymous", False) # We only want to redirect is that setting is true, and we're not allowing anonymous users if self.redirect_correct_path and not allow_anonymous: # Also we obviously only want to redirect if the URL is wrong if self.request.path != self.object.get_absolute_url(): return HttpResponsePermanentRedirect(self.object.get_absolute_url()) context = self.get_context_data(object=self.object) response = self.render_to_response(context) # If we have an unpublished article.... if self.object.published is None or self.object.published > timezone.now(): # And the user doesn't have permission to view this if not request.user.is_staff and not allow_anonymous: response = redirect_unpublished_to_login_or_404( request=request, next_url=self.object.get_absolute_url(), next_params=request.GET) # Never cache unpublished articles add_never_cache_headers(response) else: response["Vary"] = "Accept-Encoding" return response
def process_request(self, request): # Don't get in the way of any mutating requests if request.method != 'GET': return None # Ignore ajax requests if request.is_ajax(): return None # Don't redirect users when they're trying to get to the confirm page if request.path_info == tos_check_url: return None # If the user doesn't have a user ID, ignore them - they're anonymous if not request.session.get(session_key, None): return None # Grab the user ID from the session so we avoid hitting the database # for the user object. # NOTE: We use the user ID because it's not user-settable and it won't # ever change (usernames and email addresses can change) user_id = request.session['_auth_user_id'] user_auth_backend = request.session['_auth_user_backend'] # Get the cache prefix key_version = cache.get('django:tos:key_version') # Skip if the user is allowed to skip - for instance, if the user is an # admin or a staff member if cache.get('django:tos:skip_tos_check:{0}'.format(str(user_id)), False, version=key_version): return None # Ping the cache for the user agreement user_agreed = cache.get('django:tos:agreed:{0}'.format(str(user_id)), None, version=key_version) # If the cache is missing this user if user_agreed is None: # Grab the data from the database user_agreed = UserAgreement.objects.filter( user__id=user_id, terms_of_service__active=True).exists() # Set the value in the cache cache.set('django:tos:agreed:{0}'.format(user_id), user_agreed, version=key_version) if not user_agreed: # Confirm view uses these session keys. Non-middleware flow sets them in login view, # so we need to set them here. request.session['tos_user'] = user_id request.session['tos_backend'] = user_auth_backend response = HttpResponseRedirect('{0}?{1}={2}'.format( tos_check_url, REDIRECT_FIELD_NAME, request.path_info, )) add_never_cache_headers(response) return response return None
def get(self, request, *args, **kwargs): # field = request.GET.get("field") parent_value = request.GET.get("parent_value") if parent_value: parent_value_int = int(parent_value) #else: #raise ValueError if not parent_value: choices = (('', '--------'), ) elif parent_value_int in (EDP.INTERSTOREY_DRIFT, EDP.GLOBAL_DRIFT): choices = ((EDU.PURE, 'Dimensionless'), ) elif parent_value_int in (EDP.LATERAL_ROOF_DISPLACEMENT, EDP.SPECTRAL_DISPLACEMENT): choices = ((EDU.CM, 'cm'), (EDU.M, 'm')) elif parent_value_int == EDP.CHORD_ROTATION: choices = ((EDU.RAD, 'rad'), ) elif parent_value_int == EDP.CURVATURE: choices = ((EDU.RAD_KM, 'rad/km'), (EDU.RAD_M, 'rad/m')) else: raise ValueError response = HttpResponse( json.dumps(choices, cls=DjangoJSONEncoder), mimetype='application/javascript' ) add_never_cache_headers(response) return response
def dispatch(self, request, *args, **kwargs): cache_key = self.get_page_cache_key(request, request.method) cache = get_cache(self.cache_alias) # Try to fetch response from cache. response = cache.get(cache_key, None) if response is None: # Cache not set, render response. response = super(CachePageMixin, self).dispatch( request, *args, **kwargs) # Apply cache headers. patch_response_headers(response, self.cache_timeout) # Override max-age if needed. if self.cache_ensure_never_cache: add_never_cache_headers(response) # If cache timeout is set, cache response. if self.cache_timeout is not None: # Check if we have a TemplateResponse with render method. if hasattr(response, 'render') and callable(response.render): response.add_post_render_callback( lambda r: cache.set(cache_key, r, self.cache_timeout) ) else: cache.set(cache_key, response, self.cache_timeout) # Return response return response
def busca_processo(request, processo_nro): processo_json = [] assuntos_list = [] situacoes_list = [] try: if request.method == 'GET': hash_proc = processo_nro processo = get_object_or_404(Processo, numero__exact=hash_proc) assuntos = Assunto.objects.filter(proc_id = processo) situacoes = Situacao.objects.filter(proc_id = processo) for assunto in assuntos: assuntos_list.append(assunto.assunto) for situacao in situacoes: situacoes_list.append([str(situacao.data), situacao.situacao]) processo_json = dict(({'processo': processo.numero, 'id_md5': processo.id_md5, 'orgao_julgador': processo.orgao_julgador, 'justica_gratuita': processo.justica_gratuita, 'status': processo.status, 'assuntos': assuntos_list, 'situacoes': situacoes_list})) except ObjectDoesNotExist: processo = None resp = HttpResponse(json.dumps(processo_json), content_type="application/json") add_never_cache_headers(resp) return resp
def set_blog_page_cache(response): from django.core.cache import cache if not get_cms_setting('PAGE_CACHE'): return response request = response._request save_cache = True for placeholder in getattr(request, 'placeholders', []): if not placeholder.cache_placeholder: save_cache = False break if hasattr(request, 'toolbar'): if request.toolbar.edit_mode or request.toolbar.show_toolbar: save_cache = False if request.user.is_authenticated(): save_cache = False if not save_cache: add_never_cache_headers(response) return response else: version = _get_cache_version() ttl = get_cms_setting('CACHE_DURATIONS')['content'] cache.set( _blog_page_cache_key(request), (response.content, response._headers), ttl, version=version ) # See note in invalidate_cms_page_cache() _set_cache_version(version)
def process_response(self, request, response): resp = super(CacheMiddleware, self).process_response(request, response) # never cache headers + ETag add_never_cache_headers(resp) return resp
def process_response(self, request, response): """Sets the cache, if needed.""" # never cache headers + ETag add_never_cache_headers(response) if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache: # We don't need to update the cache, just return. return response if request.method != 'GET': # This is a stronger requirement than above. It is needed # because of interactions between this middleware and the # HTTPMiddleware, which throws the body of a HEAD-request # away before this middleware gets a chance to cache it. return response if not response.status_code == 200: return response # use the precomputed cache_key if request._cache_middleware_key: cache_key = request._cache_middleware_key else: cache_key = learn_cache_key(request, response, self.cache_timeout, self.key_prefix) # include the orig_time information within the cache cache.set(cache_key, (time.time(), response), self.cache_timeout) return response
def get(self, request, subsection_id, *args, **kwargs): data = SkipRule.objects.filter(subsection_id=subsection_id).select_subclasses() response_data = map(lambda x: x.to_dictionary(request.user), data) response = HttpResponse(json.dumps(response_data), content_type="application/json", status=200) add_never_cache_headers(response) return response
def get(self, request, *args, **kwargs): section_id = kwargs.get("section_id") subsections = self._get_subsections(section_id, request) subsections_dict = map(lambda subsection: subsection.to_dict(), subsections) response = HttpResponse(json.dumps(subsections_dict), content_type="application/json", status=200) add_never_cache_headers(response) return response
def _wrapped_view_func(request, *args, **kwargs): response = view_func(request, *args, **kwargs) # Although rare, it is possible for a view to return None (e.g. the # django.contrib.admin.sites.AdminSite.login view in one corner-case) if response: add_never_cache_headers(response) return response
def process_response(self, request, response): if hasattr(request, 'user') and request.user.is_authenticated(): if all(path_re.search(request.path) is None for path_re in self.EXCLUDED_PATHS): add_never_cache_headers(response) return response
def process_request(self, request): token = request.GET.get(self.param_name) if not token: return redirect_url = URLObject(request.get_full_path()) redirect_url = redirect_url.del_query_param(self.param_name) response = redirect(unicode(redirect_url)) try: token_data = tempus_loads(token, max_age=self.max_age) tempus = getattr(request, 'tempus', None) if tempus: current_tempus = tempus.copy() current_tempus.update(token_data) request.tempus = current_tempus else: request.tempus = token_data except SignatureExpired: value = self.__process_func(request, 'expired_func') if value: return value except BadSignature: value = self.__process_func(request, 'unsuccess_func') if value: return value else: value = self.__process_func(request, 'success_func') if value: return value add_never_cache_headers(response) return response
def get_net_compare_data_list(request): print 'in get_net_compare_data_list' datas =[] columnIndexNameMap = {0: 'datetime'} appprotc = request.GET["appprotc"] print appprotc earliest_time = NetBehaviour.objects.order_by("datetime")[0].datetime latest_time = NetBehaviour.objects.order_by("-datetime")[0].datetime end_days = (latest_time-earliest_time).days #check the days between make sure it is 30 days if end_days < 30: earliest_time = earliest_time print earliest_time,'get_net_compare_data_list 30 ' end_days = 30 else: earliest_time = latest_time + datetime.timedelta(hours=-30*24) print earliest_time,'get_net_compare_data_list 60' #fill the time with 0 and add num to the one that exist filterdict = dict() filterdict["appprotc"] = appprotc if User.objects.get(username = request.user).is_superuser == False: userinfo = Userinfo.objects.get(user=request.user) filterdict["user_id"] =ObjectId(userinfo.id) db_datetime = get_highcharts_records(NetBehaviour,columnIndexNameMap,filterdict,"datetime") appprotcdict = {} for i in range((latest_time - earliest_time).seconds/3600 + 1 + end_days*24): currtime = earliest_time + datetime.timedelta(hours=i) totime = earliest_time + datetime.timedelta(hours=i+1) secstimestr = datestrtsecs(currtime.strftime("%Y-%m-%d %H:00:00"))*1000 #print secstimestr totimestr = datestrtsecs(totime.strftime("%Y-%m-%d %H:00:00"))*1000 appprotcdict[secstimestr] = 0 for cmp_time in db_datetime: icmp_time = int(cmp_time[0]) #print icmp_time,'icmp_time' if icmp_time >= secstimestr and icmp_time <= totimestr: appprotcdict[secstimestr] += 1 # change tha data to style what we want strlist = list() for i in sorted(appprotcdict): tmpstr = "[%s,%d]"%(i,appprotcdict[i]) strlist.append(tmpstr) appprotcdata = ",".join(strlist) #print driverdata datas.append(appprotcdata) response = HttpResponse(simplejson.dumps(datas), mimetype='application/json') #阻止缓存 add_never_cache_headers(response) #print response return response
def document(request, document_slug, document_locale): """ View a wiki document. """ fallback_reason = None slug_dict = split_slug(document_slug) # Is there a document at this slug, in this locale? doc, fallback_reason = _get_doc_and_fallback_reason( document_locale, document_slug) if doc is None: # Possible the document once existed, but is now deleted. # If so, show that it was deleted. deletion_log_entries = DocumentDeletionLog.objects.filter( locale=document_locale, slug=document_slug) if deletion_log_entries.exists(): return _document_deleted(request, deletion_log_entries) # We can throw a 404 immediately if the request type is HEAD. # TODO: take a shortcut if the document was found? if request.method == 'HEAD': raise Http404 # Check if we should fall back to default locale. fallback_doc, fallback_reason, redirect_url = _default_locale_fallback( request, document_slug, document_locale) if fallback_doc is not None: doc = fallback_doc if redirect_url is not None: return redirect(redirect_url) else: # If a Document is not found, we may 404 immediately based on # request parameters. if (any([ request.GET.get(param, None) for param in ('raw', 'include', 'nocreate') ]) or not request.user.is_authenticated()): raise Http404 # The user may be trying to create a child page; if a parent exists # for this document, redirect them to the "Create" page # Otherwise, they could be trying to create a main level doc. create_url = _document_redirect_to_create(document_slug, document_locale, slug_dict) response = redirect(create_url) add_never_cache_headers(response) return response # We found a Document. Now we need to figure out how we're going # to display it. # If we're a redirect, and redirecting hasn't been disabled, redirect. # Obey explicit redirect pages: # Don't redirect on redirect=no (like Wikipedia), so we can link from a # redirected-to-page back to a "Redirected from..." link, so you can edit # the redirect. redirect_url = (None if request.GET.get('redirect') == 'no' else doc.get_redirect_url()) if redirect_url and redirect_url != doc.get_absolute_url(): url = urlparams(redirect_url, query_dict=request.GET) # TODO: Re-enable the link in this message after Django >1.5 upgrade # Redirected from <a href="%(url)s?redirect=no">%(url)s</a> messages.add_message( request, messages.WARNING, mark_safe( ugettext(u'Redirected from %(url)s') % {"url": request.build_absolute_uri(doc.get_absolute_url())}), extra_tags='wiki_redirect') return HttpResponsePermanentRedirect(url) # Read some request params to see what we're supposed to do. rendering_params = {} for param in ('raw', 'summary', 'include', 'edit_links'): rendering_params[param] = request.GET.get(param, False) is not False rendering_params['section'] = request.GET.get('section', None) rendering_params['render_raw_fallback'] = False # Are we in a content experiment? original_doc = doc doc, exp_params = _apply_content_experiment(request, doc) rendering_params['experiment'] = exp_params # Get us some HTML to play with. rendering_params['use_rendered'] = (kumascript.should_use_rendered( doc, request.GET)) doc_html, ks_errors, render_raw_fallback = _get_html_and_errors( request, doc, rendering_params) rendering_params['render_raw_fallback'] = render_raw_fallback # Start parsing and applying filters. if doc.show_toc and not rendering_params['raw']: toc_html = doc.get_toc_html() else: toc_html = None doc_html = _filter_doc_html(request, doc, doc_html, rendering_params) if rendering_params['raw']: response = _document_raw(doc_html) else: # Get the SEO summary seo_summary = doc.get_summary_text() # Get the additional title information, if necessary. seo_parent_title = _get_seo_parent_title(original_doc, slug_dict, document_locale) # Retrieve pre-parsed content hunks quick_links_html = doc.get_quick_links_html() zone_subnav_html = doc.get_zone_subnav_html() body_html = doc.get_body_html() # Record the English slug in Google Analytics, # to associate translations if original_doc.locale == 'en-US': en_slug = original_doc.slug elif original_doc.parent_id and original_doc.parent.locale == 'en-US': en_slug = original_doc.parent.slug else: en_slug = '' share_text = ugettext('I learned about %(title)s on MDN.') % { "title": doc.title } contributors = doc.contributors contributors_count = len(contributors) has_contributors = contributors_count > 0 other_translations = original_doc.get_other_translations( fields=['title', 'locale', 'slug', 'parent']) # Bundle it all up and, finally, return. context = { 'document': original_doc, 'document_html': doc_html, 'toc_html': toc_html, 'quick_links_html': quick_links_html, 'zone_subnav_html': zone_subnav_html, 'body_html': body_html, 'contributors': contributors, 'contributors_count': contributors_count, 'contributors_limit': 6, 'has_contributors': has_contributors, 'fallback_reason': fallback_reason, 'kumascript_errors': ks_errors, 'macro_sources': (kumascript.macro_sources( force_lowercase_keys=True) if ks_errors else None), 'render_raw_fallback': rendering_params['render_raw_fallback'], 'seo_summary': seo_summary, 'seo_parent_title': seo_parent_title, 'share_text': share_text, 'search_url': get_search_url_from_referer(request) or '', 'analytics_page_revision': doc.current_revision_id, 'analytics_en_slug': en_slug, 'content_experiment': rendering_params['experiment'], 'other_translations': other_translations, } response = render(request, 'wiki/document.html', context) if ks_errors or request.user.is_authenticated(): add_never_cache_headers(response) # We're doing this to prevent any unknown intermediate public HTTP caches # from erroneously caching without considering cookies, since cookies do # affect the content of the response. The primary CDN is configured to # cache based on a whitelist of cookies. patch_vary_headers(response, ('Cookie', )) return _add_kuma_revision_header(doc, response)
def get(self, request, *args, **kwargs): # TODO: we must find a better way to invalidate the cache. # Simply adding a no-cache header eventually decreases the performance dramatically. response = self.list(request, *args, **kwargs) add_never_cache_headers(response) return response
def wiki_document(request, document_slug, document_locale): """ View a wiki document. """ slug_dict = split_slug(document_slug) # Is there a document at this slug, in this locale? doc, fallback_reason = _get_doc_and_fallback_reason( document_locale, document_slug) if doc is None: # Possible the document once existed, but is now deleted. # If so, show that it was deleted. deletion_log_entries = DocumentDeletionLog.objects.filter( locale=document_locale, slug=document_slug) if deletion_log_entries.exists(): # Show deletion log and restore / purge for soft-deleted docs deleted_doc = Document.deleted_objects.filter( locale=document_locale, slug=document_slug) if deleted_doc.exists(): return _document_deleted(request, deletion_log_entries) # We can throw a 404 immediately if the request type is HEAD. # TODO: take a shortcut if the document was found? if request.method == "HEAD": raise Http404 # Check if we should fall back to default locale. fallback_doc, fallback_reason, redirect_url = _default_locale_fallback( request, document_slug, document_locale) if fallback_doc is not None: doc = fallback_doc if redirect_url is not None: return redirect(redirect_url) else: # If a Document is not found, we may 404 immediately based on # request parameters. if (any([ request.GET.get(param, None) for param in ("raw", "include", "nocreate") ]) or not request.user.is_authenticated): raise Http404 # The user may be trying to create a child page; if a parent exists # for this document, redirect them to the "Create" page # Otherwise, they could be trying to create a main level doc. create_url = _document_redirect_to_create(document_slug, document_locale, slug_dict) response = redirect(create_url) add_never_cache_headers(response) return response # We found a Document. Now we need to figure out how we're going # to display it. # If we're a redirect, and redirecting hasn't been disabled, redirect. # Obey explicit redirect pages: # Don't redirect on redirect=no (like Wikipedia), so we can link from a # redirected-to-page back to a "Redirected from..." link, so you can edit # the redirect. redirect_url = (None if request.GET.get("redirect") == "no" else doc.get_redirect_url()) if redirect_url and redirect_url != doc.get_absolute_url(): url = urlparams(redirect_url, query_dict=request.GET) # TODO: Re-enable the link in this message after Django >1.5 upgrade # Redirected from <a href="%(url)s?redirect=no">%(url)s</a> messages.add_message( request, messages.WARNING, mark_safe( gettext("Redirected from %(url)s") % {"url": request.build_absolute_uri(doc.get_absolute_url())}), extra_tags="wiki_redirect", ) return HttpResponsePermanentRedirect(url) # Read some request params to see what we're supposed to do. rendering_params = {} for param in ("raw", "summary", "include", "edit_links"): rendering_params[param] = request.GET.get(param, False) is not False rendering_params["section"] = request.GET.get("section", None) rendering_params["render_raw_fallback"] = False # Are we in a content experiment? original_doc = doc doc, exp_params = _apply_content_experiment(request, doc) rendering_params["experiment"] = exp_params # Get us some HTML to play with. rendering_params["use_rendered"] = kumascript.should_use_rendered( doc, request.GET) doc_html, ks_errors, render_raw_fallback = _get_html_and_errors( request, doc, rendering_params) rendering_params["render_raw_fallback"] = render_raw_fallback # Start parsing and applying filters. if doc.show_toc and not rendering_params["raw"]: toc_html = doc.get_toc_html() else: toc_html = None doc_html = _filter_doc_html(request, doc, doc_html, rendering_params) if rendering_params["raw"]: response = _document_raw(doc_html) else: # Get the SEO summary seo_summary = doc.get_summary_text() # Get the additional title information, if necessary. seo_parent_title = _get_seo_parent_title(original_doc, slug_dict, document_locale) # Retrieve pre-parsed content hunks quick_links_html = doc.get_quick_links_html() body_html = doc.get_body_html() # Record the English slug in Google Analytics, # to associate translations if original_doc.locale == "en-US": en_slug = original_doc.slug elif original_doc.parent_id and original_doc.parent.locale == "en-US": en_slug = original_doc.parent.slug else: en_slug = "" share_text = gettext("I learned about %(title)s on MDN.") % { "title": doc.title } contributors = doc.contributors contributors_count = len(contributors) has_contributors = contributors_count > 0 other_translations = original_doc.get_other_translations( fields=["title", "locale", "slug", "parent"]) all_locales = {original_doc.locale} | set( trans.locale for trans in other_translations) # Bundle it all up and, finally, return. context = { "document": original_doc, "document_html": doc_html, "toc_html": toc_html, "quick_links_html": quick_links_html, "body_html": body_html, "contributors": contributors, "contributors_count": contributors_count, "contributors_limit": 6, "has_contributors": has_contributors, "fallback_reason": fallback_reason, "kumascript_errors": ks_errors, "macro_sources": (kumascript.macro_sources( force_lowercase_keys=True) if ks_errors else None), "render_raw_fallback": rendering_params["render_raw_fallback"], "seo_summary": seo_summary, "seo_parent_title": seo_parent_title, "share_text": share_text, "search_url": get_search_url_from_referer(request) or "", "analytics_page_revision": doc.current_revision_id, "analytics_en_slug": en_slug, "content_experiment": rendering_params["experiment"], "other_translations": other_translations, "all_locales": all_locales, } response = render(request, "wiki/document.html", context) if ks_errors or request.user.is_authenticated: add_never_cache_headers(response) # We're doing this to prevent any unknown intermediate public HTTP caches # from erroneously caching without considering cookies, since cookies do # affect the content of the response. The primary CDN is configured to # cache based on a whitelist of cookies. patch_vary_headers(response, ("Cookie", )) return _add_kuma_revision_header(doc, response)
def callback(response): add_never_cache_headers(response) return response
def process_response(self, request, response): if 'Cache-Control' not in response: add_never_cache_headers(response) return response
def process_response(self, request, response): try: add_never_cache_headers(response) except: pass return response
for i in tenantsData: i[columnNameIndexMap['sex']] = dict(SEX_CHOICES)[i[columnNameIndexMap['sex']]] response_dict = {} response_dict.update({'aaData':tenantsData}) response_dict.update({ 'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords':iTotalDisplayRecords, 'sColumns':sColumns}) response = HttpResponse(json.json.dumps(response_dict)) #阻止缓存 add_never_cache_headers(response) return response @login_required def addtenant(request): username = request.user.username userId = request.user.id if request.method == "POST": form = TenantForm(data = request.POST,userId = userId ) if form.is_valid(): form.save() success = True successinfo = "添加" return render_to_response('tenant/tenant.html',{ "title":'租客管理',
def get_datatables_records(request, querySet, columnIndexNameMap, jsonTemplatePath=None, *args): """ Usage: querySet: query set to draw data from. columnIndexNameMap: field names in order to be displayed. jsonTemplatePath: optional template file to generate custom json from. If not provided it will generate the data directly from the model. """ cols = int(request.GET.get('iColumns', 0)) # Get the number of columns iDisplayLength = min( int(request.GET.get('iDisplayLength', 10)), 100 ) #Safety measure. If someone messes with iDisplayLength manually, we clip it to the max value of 100. startRecord = int(request.GET.get('iDisplayStart', 0)) # Where the data starts from (page) endRecord = startRecord + iDisplayLength # where the data ends (end of page) # Pass sColumns keys = columnIndexNameMap.keys() keys.sort() colitems = [columnIndexNameMap[key] for key in keys] sColumns = ",".join(map(str, colitems)) # Ordering data iSortingCols = int(request.GET.get('iSortingCols', 0)) asortingCols = [] if iSortingCols: for sortedColIndex in range(0, iSortingCols): sortedColID = int( request.GET.get('iSortCol_' + str(sortedColIndex), 0)) if request.GET.get( 'bSortable_{0}'.format(sortedColID), 'false' ) == 'true': # make sure the column is sortable first sortedColName = columnIndexNameMap[sortedColID] sortingDirection = request.GET.get( 'sSortDir_' + str(sortedColIndex), 'asc') if sortingDirection == 'desc': sortedColName = '-' + sortedColName asortingCols.append(sortedColName) querySet = querySet.order_by(*asortingCols) # Determine which columns are searchable searchableColumns = [] for col in range(0, cols): if request.GET.get('bSearchable_{0}'.format(col), False) == 'true': searchableColumns.append(columnIndexNameMap[col]) # Apply filtering by value sent by user customSearch = request.GET.get('sSearch', '').encode('utf-8') if customSearch != '': outputQ = None first = True for searchableColumn in searchableColumns: kwargz = {searchableColumn + "__icontains": customSearch} outputQ = outputQ | Q(**kwargz) if outputQ else Q(**kwargz) querySet = querySet.filter(outputQ) # Individual column search outputQ = None for col in range(0, cols): if request.GET.get('sSearch_{0}'.format(col), False) > '' and request.GET.get( 'bSearchable_{0}'.format(col), False) == 'true': kwargz = { columnIndexNameMap[col] + "__icontains": request.GET['sSearch_{0}'.format(col)] } outputQ = outputQ & Q(**kwargz) if outputQ else Q(**kwargz) if outputQ: querySet = querySet.filter(outputQ) iTotalRecords = iTotalDisplayRecords = querySet.count( ) #count how many records match the final criteria querySet = querySet[startRecord:endRecord] #get the slice sEcho = int(request.GET.get('sEcho', 0)) # required echo response if jsonTemplatePath: jstonString = render_to_string( jsonTemplatePath, locals() ) #prepare the JSON with the response, consider using : from django.template.defaultfilters import escapejs response = HttpResponse(jstonString, mimetype="application/javascript") else: aaData = [] a = querySet.values() for row in a: rowkeys = row.keys() rowvalues = row.values() rowlist = [] for col in range(0, len(colitems)): for idx, val in enumerate(rowkeys): if val == colitems[col]: rowlist.append(str(rowvalues[idx])) aaData.append(rowlist) response_dict = {} response_dict.update({'aaData': aaData}) response_dict.update({ 'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords': iTotalDisplayRecords, 'sColumns': sColumns }) response = HttpResponse(simplejson.dumps(response_dict), mimetype='application/javascript') #prevent from caching datatables result add_never_cache_headers(response) return response
def paper_detail(request, arxiv_id): arxiv_id, version = remove_version_from_arxiv_id(arxiv_id) if version is not None: return redirect("paper_detail", arxiv_id=arxiv_id) # Get the requested paper try: paper = Paper.objects.get(arxiv_id=arxiv_id) # If it doesn't exist, render it! except Paper.DoesNotExist: # update_or_create to avoid the race condition where several people # hit a new paper at the same time try: paper, created = Paper.objects.update_or_create_from_arxiv_id( arxiv_id) except PaperNotFoundError: raise Http404(f"Paper '{arxiv_id}' not found on Arxiv") if created: try: paper.render() except PaperIsNotRenderableError: res = render(request, "papers/paper_detail_not_renderable.html", {"paper": paper}, status=404) return add_paper_cache_control(res) # First, try to get the latest succeeded paper -- this is always what # we'll want to render. try: r = paper.renders.succeeded().not_expired().latest() except Render.DoesNotExist: # See if there is a render running try: r = paper.renders.not_expired().latest() except Render.DoesNotExist: try: # Either rendering has not started or it has expired. r = paper.render() except PaperIsNotRenderableError: res = render(request, "papers/paper_detail_not_renderable.html", {"paper": paper}, status=404) return add_paper_cache_control(res) if r.state in (Render.STATE_UNSTARTED, Render.STATE_RUNNING): res = render(request, "papers/paper_detail_rendering.html", { 'paper': paper, 'render': r, }) add_never_cache_headers(res) return res # Fall back to error if there is no successful or running render res = render(request, "papers/paper_detail_error.html", {"paper": paper}, status=500) return add_paper_cache_control(res) processed_render = r.get_processed_render() res = render( request, "papers/paper_detail.html", { 'paper': paper, 'render': r, 'body': processed_render['body'], 'scripts': processed_render['scripts'], 'styles': processed_render['styles'], }) return add_paper_cache_control(res)
def paper_detail(request, arxiv_id): force_render = "render" in request.GET no_render = "no-render" in request.GET arxiv_id, version = remove_version_from_arxiv_id(arxiv_id) if version is not None: return redirect("paper_detail", arxiv_id=arxiv_id) # Get the requested paper try: paper = Paper.objects.get(arxiv_id=arxiv_id) # If it doesn't exist, fetch from arXiv API except Paper.DoesNotExist: # update_or_create to avoid the race condition where several people # hit a new paper at the same time try: paper, _ = Paper.objects.update_or_create_from_arxiv_id(arxiv_id) except PaperNotFoundError: raise Http404(f"Paper '{arxiv_id}' not found on arXiv") try: render_to_display = paper.get_render_to_display_and_render_if_needed( force_render=force_render, no_render=no_render, ) # This will only get raised when no_render is true except Render.DoesNotExist: raise Http404("No render found for this paper, and not rendering") except PaperIsNotRenderableError: res = render( request, "papers/paper_detail_not_renderable.html", {"paper": paper}, status=404, ) return add_paper_cache_control(res, request) except TooManyRendersRunningError: res = render( request, "papers/paper_detail_too_many_renders.html", {"paper": paper}, status=503, ) add_never_cache_headers(res) return res # Switch response based on state if render_to_display.state == Render.STATE_RUNNING: res = render( request, "papers/paper_detail_rendering.html", { "paper": paper, "render": render_to_display }, status=503, ) add_never_cache_headers(res) return res elif render_to_display.state == Render.STATE_FAILURE: res = render(request, "papers/paper_detail_error.html", {"paper": paper}, status=500) return add_paper_cache_control(res, request) elif render_to_display.state == Render.STATE_SUCCESS: processed_render = render_to_display.get_processed_render() res = render( request, "papers/paper_detail.html", { "paper": paper, "render": render_to_display, "body": processed_render["body"], "links": processed_render["links"], "scripts": processed_render["scripts"], "styles": processed_render["styles"], "abstract": processed_render["abstract"], "first_image": processed_render["first_image"], }, ) return add_paper_cache_control(res, request) else: raise Exception(f"Unknown render state: {render_to_display.state}")
def go_assessments_list(request): today = datetime.date.today() kwargzs = [ { "assessment__enddate__gte": today }, { "assessment__begindate__lte": today }, ] outputQ = None for kwargz in kwargzs: outputQ = outputQ & Q(**kwargz) if outputQ else Q(**kwargz) assessmentrecords = AssessmentRecord.objects.filter( ostudent=request.user.student).filter(outputQ).order_by( '-assessment__term', 'dstudent__user__username') customSearch = request.GET.get('sSearch', '').rstrip().encode('utf-8') if customSearch != '': kwargzs = [ { "dstudent__realname__icontains": customSearch }, { "dstudent__user__username__icontains": customSearch }, ] outputQ = None for kwargz in kwargzs: outputQ = outputQ | Q(**kwargz) if outputQ else Q(**kwargz) assessmentrecords = assessmentrecords.filter(outputQ) cols = int(request.GET.get('iColumns', 0)) #获取有多少列数据 iDisplayLength = min(int(request.GET.get('iDisplayLength', 10)), 100) #每页获取rows个数 startRecord = int(request.GET.get('iDisplayStart', 0)) #本页第一条数据,是所有数据的第几个,从0开始 endRecord = startRecord + iDisplayLength sEcho = int(request.GET.get('sEcho', 0)) #页数 iTotalRecords = iTotalDisplayRecords = assessmentrecords.count() #总共的rows数 assessmentrecords = assessmentrecords[startRecord:endRecord] #注意此时:不能使用unicode(dict(EVALUATE_CHOICES)[i.result]) aaData = [[ unicode(i.assessment.term), unicode(i.dstudent.realname), unicode(i.dstudent.user.username), dict(SEX_CHOICES)[i.dstudent.sex], dict(EVALUATE_CHOICES)[i.result] ] for i in assessmentrecords] response_dict = {} response_dict.update({'aaData': aaData}) response_dict.update({ 'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords': iTotalDisplayRecords }) response = HttpResponse(simplejson.dumps(response_dict), mimetype='application/json') #阻止缓存 add_never_cache_headers(response) return response
def response(self, request, **context): response = render_to_response(self.template, context, context_instance=RequestContext(request)) add_never_cache_headers(response) return response
def paper_detail(request, arxiv_id): arxiv_id, version = remove_version_from_arxiv_id(arxiv_id) if version is not None: return redirect("paper_detail", arxiv_id=arxiv_id) # Get the requested paper try: paper = Paper.objects.get(arxiv_id=arxiv_id) # If it doesn't exist, fetch from arXiv API except Paper.DoesNotExist: # update_or_create to avoid the race condition where several people # hit a new paper at the same time try: paper, _ = Paper.objects.update_or_create_from_arxiv_id(arxiv_id) except PaperNotFoundError: raise Http404(f"Paper '{arxiv_id}' not found on arXiv") # Get latest render that hasn't expired try: r = paper.renders.not_expired().latest() except Render.DoesNotExist: try: # If it has expired or hasn't been started yet, render it! r = paper.render() except PaperIsNotRenderableError: res = render(request, "papers/paper_detail_not_renderable.html", {"paper": paper}, status=404) return add_paper_cache_control(res) # Stuck for some reason, give it a boot # This normally happens if there is an exception raised in render() if r.state == Render.STATE_UNSTARTED: # This will put it into running state r = paper.render() # Switch response based on state if r.state == Render.STATE_RUNNING: res = render(request, "papers/paper_detail_rendering.html", { 'paper': paper, 'render': r, }, status=503) add_never_cache_headers(res) return res elif r.state == Render.STATE_FAILURE: # Fall back to error if there is no successful or running render res = render(request, "papers/paper_detail_error.html", {"paper": paper}, status=500) return add_paper_cache_control(res) elif r.state == Render.STATE_SUCCESS: processed_render = r.get_processed_render() res = render( request, "papers/paper_detail.html", { 'paper': paper, 'render': r, 'body': processed_render['body'], 'links': processed_render['links'], 'scripts': processed_render['scripts'], 'styles': processed_render['styles'], 'abstract': processed_render['abstract'], 'first_image': processed_render['first_image'], }) return add_paper_cache_control(res) else: raise Exception(f"Unknown render state: {r.state}")
def _render_blog_post(request, oid, page=None, screenshot_mode=False): if oid.endswith("/"): oid = oid[:-1] try: post = BlogItem.objects.get(oid=oid) except BlogItem.DoesNotExist: try: post = BlogItem.objects.get(oid__iexact=oid) except BlogItem.DoesNotExist: if oid == "add": return redirect(reverse("add_post")) raise http.Http404(oid) # If you try to view a blog post that is beyond 10 days in the # the future it should raise a 404 error. future = timezone.now() + datetime.timedelta(days=10) if post.pub_date > future: raise http.Http404("not published yet") if page is None: page = 1 else: page = int(page) if page == 1: return redirect("blog_post", oid) if page > settings.MAX_BLOGCOMMENT_PAGES: raise http.Http404("Gone too far") # Reasons for not being here if request.method == "HEAD": return http.HttpResponse("") elif request.method == "GET" and (request.GET.get("replypath") or request.GET.get("show-comments")): return http.HttpResponsePermanentRedirect(request.path) # attach a field called `_absolute_url` which depends on the request base_url = get_base_url(request) post._absolute_url = base_url + reverse("blog_post", args=(post.oid, )) context = {"post": post, "screenshot_mode": screenshot_mode} if "/plog/blogitem-040601-1" not in request.path: try: context["previous_post"] = post.get_previous_by_pub_date() except BlogItem.DoesNotExist: context["previous_post"] = None try: context["next_post"] = post.get_next_by_pub_date( pub_date__lt=timezone.now()) except BlogItem.DoesNotExist: context["next_post"] = None if post.screenshot_image: context["screenshot_image"] = thumbnail(post.screenshot_image, "1280x1000", quality=90).url if context["screenshot_image"].startswith("//"): # facebook is not going to like that context[ "screenshot_image"] = "https:" + context["screenshot_image"] else: context["screenshot_image"] = None # Cheat a little and make the open graph image absolute if need be. if post.open_graph_image and "://" not in post.open_graph_image: post.open_graph_image = request.build_absolute_uri( post.open_graph_image) blogcomments = BlogComment.objects.filter(blogitem=post, approved=True) only = ( "oid", "blogitem_id", "parent_id", "approved", "comment_rendered", "add_date", "name", ) root_comments = (blogcomments.filter( parent__isnull=True).order_by("add_date").only(*only)) replies = blogcomments.filter( parent__isnull=False).order_by("add_date").only(*only) count_comments = blogcomments.count() root_comments_count = root_comments.count() if page > 1: if (page - 1) * settings.MAX_RECENT_COMMENTS > root_comments_count: raise http.Http404("Gone too far") slice_m, slice_n = get_blogcomment_slice(root_comments_count, page) root_comments = root_comments[slice_m:slice_n] comments_truncated = False if root_comments_count > settings.MAX_RECENT_COMMENTS: comments_truncated = settings.MAX_RECENT_COMMENTS all_comments = defaultdict(list) for comment in root_comments: all_comments[comment.parent_id].append(comment) for comment in replies: all_comments[comment.parent_id].append(comment) context["comments_truncated"] = comments_truncated context["count_comments"] = count_comments context["all_comments"] = all_comments if "/plog/blogitem-040601-1" not in request.path: context["related_by_keyword"] = get_related_posts_by_keyword(post, limit=5) context["show_buttons"] = not screenshot_mode context["show_carbon_ad"] = not screenshot_mode # context["show_carbon_ad"] = 0 # context["show_carbon_native_ad"] = context["show_carbon_ad"] # Disabled as of Aug 2019 because the $$$ profit was too small and not # worth the web perf "drag" that it costs. context["show_carbon_native_ad"] = False context["home_url"] = request.build_absolute_uri("/") context["page_title"] = post.title context["pub_date_years"] = THIS_YEAR - post.pub_date.year context["page"] = page if page < settings.MAX_BLOGCOMMENT_PAGES: # But is there even a next page?! if page * settings.MAX_RECENT_COMMENTS < root_comments_count: context["paginate_uri_next"] = reverse("blog_post", args=(post.oid, page + 1)) if page > 1: context["paginate_uri_previous"] = reverse("blog_post", args=(post.oid, page - 1)) # The `post.open_graph_image` is a string. It looks something like this: # '/cache/1e/a7/1ea7b1a42e9161.png' and it would get rendered # into the template like this: # <meta property="og:image" content="/cache/1e/a7/1ea7b1a42e9161.png"> # But post-processing will make this an absolute URL. And that might # not pick up the smarts that `get_base_url(request)` can do so # turn this into a control template context variable. absolute_open_graph_image = None if post.open_graph_image: absolute_open_graph_image = base_url + urlparse( post.open_graph_image).path context["absolute_open_graph_image"] = absolute_open_graph_image context["not_published_yet"] = post.pub_date > timezone.now() response = render(request, "plog/post.html", context) response["x-server"] = "django" # If it hasn't been published yet, don't cache-control it. if context["not_published_yet"]: add_never_cache_headers(response) return response
def _wrapped_view_func(request, *args, **kwargs): response = view_func(request, *args, **kwargs) add_never_cache_headers(response) return response
def process_response(self, request, response): if hasattr(request, 'user') and request.user.is_authenticated(): add_never_cache_headers(response) return response
def get_profit_list(request): sql = "select profit_id, sell_id, stock_id, quantity, unitprice, profit, profitdate from profit " customSearch = request.GET.get('sSearch', '').rstrip().encode('utf-8'); #if customSearch != '': # rawsql = rawsql + " where address like '%" + customSearch + "%'" if int(request.GET.get("iSortCol_0")) == 0: if request.GET.get("sSortDir_0") == 'desc': sql = sql + " order by profit_id " else: sql = sql + " order by profit_id desc" if int(request.GET.get("iSortCol_0")) == 5: if request.GET.get("sSortDir_0") == 'desc': sql = sql + " order by profit " else: sql = sql + " order by profit desc" cursor = connection.cursor() cursor.execute(sql) fetchall = cursor.fetchall() rowcount = 0; aaData = [] for obj in fetchall: dic = [] for i in obj: dic.append(i) aaData.append(dic) rowcount = rowcount + 1 cols = int(request.GET.get('iColumns',0)) #获取有多少列数据 iDisplayLength = min(int(request.GET.get('iDisplayLength',10)),100) #每页获取rows个数 startRecord = int(request.GET.get('iDisplayStart',0)) #本页第一条数据,是所有数据的第几个,从0开始 endRecord = startRecord + iDisplayLength sEcho = int(request.GET.get('sEcho',0)) #页数 #iTotalRecords = iTotalDisplayRecords = grades.count() #总共的rows数 iTotalRecords = iTotalDisplayRecords = rowcount #总共的rows数 aaData = aaData[startRecord:endRecord] #grades = grades[startRecord:endRecord] #aaData = [[unicode(i.term),unicode(i.student.realname),unicode(i.student.user.username),unicode(i.student.theclass.classid),unicode(str(i.score)),] for i in grades] response_dict = {} response_dict.update({'aaData':aaData}) response_dict.update({ 'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords':iTotalDisplayRecords}) response = HttpResponse(simplejson.dumps(response_dict), mimetype='application/json') #阻止缓存 add_never_cache_headers(response) return response
def process_request(self, request): if settings.MAINTENANCE_MODE or core.get_maintenance_mode(): try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return None except NoReverseMatch: #maintenance_mode.urls not added pass if hasattr(request, 'user'): if settings.MAINTENANCE_MODE_IGNORE_STAFF and request.user.is_staff: return None if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER and request.user.is_superuser: return None if settings.MAINTENANCE_MODE_IGNORE_TEST: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) or ( len(sys.argv) > 1 and sys.argv[1] == 'test'): #python runtests.py | python manage.py test | python setup.py test | django-admin.py test is_testing = True if is_testing: return None if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(request.META['REMOTE_ADDR']): return None if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: url_re = re.compile(url) if url_re.match(request.path_info): return None if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return None return HttpResponseRedirect( settings.MAINTENANCE_MODE_REDIRECT_URL) else: request_context = {} if settings.MAINTENANCE_MODE_TEMPLATE_CONTEXT: request_context_func = utils.import_function( settings.MAINTENANCE_MODE_TEMPLATE_CONTEXT) if request_context_func: request_context = request_context_func(request=request) if django.VERSION < (1, 8): response = render_to_response( settings.MAINTENANCE_MODE_TEMPLATE, request_context, context_instance=RequestContext(request), content_type='text/html') else: response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, context=request_context, content_type='text/html', status=503) add_never_cache_headers(response) return response else: return None
def renderView(request): start = time() (graphOptions, requestOptions) = parseOptions(request) useCache = 'noCache' not in requestOptions cacheTimeout = requestOptions['cacheTimeout'] # TODO: Make that a namedtuple or a class. requestContext = { 'startTime': requestOptions['startTime'], 'endTime': requestOptions['endTime'], 'now': requestOptions['now'], 'localOnly': requestOptions['localOnly'], 'template': requestOptions['template'], 'tzinfo': requestOptions['tzinfo'], 'forwardHeaders': requestOptions['forwardHeaders'], 'data': [], 'prefetched': {}, 'xFilesFactor': requestOptions['xFilesFactor'], } data = requestContext['data'] response = None # First we check the request cache if useCache: requestKey = hashRequest(request) response = cache.get(requestKey) if response: log.cache('Request-Cache hit [%s]' % requestKey) log.rendering('Returned cached response in %.6f' % (time() - start)) return response log.cache('Request-Cache miss [%s]' % requestKey) # Now we prepare the requested data if requestOptions['graphType'] == 'pie': for target in requestOptions['targets']: if target.find(':') >= 0: try: name, value = target.split(':', 1) value = float(value) except: raise ValueError("Invalid target '%s'" % target) data.append((name, value)) else: seriesList = evaluateTarget(requestContext, target) for series in seriesList: func = PieFunction(requestOptions['pieMode']) data.append((series.name, func(requestContext, series) or 0)) elif requestOptions['graphType'] == 'line': # Let's see if at least our data is cached cachedData = None if useCache: targets = requestOptions['targets'] startTime = requestOptions['startTime'] endTime = requestOptions['endTime'] dataKey = hashData(targets, startTime, endTime, requestOptions['xFilesFactor']) cachedData = cache.get(dataKey) if cachedData: log.cache("Data-Cache hit [%s]" % dataKey) else: log.cache("Data-Cache miss [%s]" % dataKey) if cachedData is not None: requestContext['data'] = data = cachedData else: # Have to actually retrieve the data now targets = requestOptions['targets'] data.extend(evaluateTarget(requestContext, targets)) if useCache: cache.add(dataKey, data, cacheTimeout) renderStart = time() format = requestOptions.get('format') if format == 'csv': response = renderViewCsv(requestOptions, data) elif format == 'json': response = renderViewJson(requestOptions, data) elif format == 'dygraph': response = renderViewDygraph(requestOptions, data) elif format == 'rickshaw': response = renderViewRickshaw(requestOptions, data) elif format == 'raw': response = renderViewRaw(requestOptions, data) elif format == 'pickle': response = renderViewPickle(requestOptions, data) elif format == 'msgpack': response = renderViewMsgPack(requestOptions, data) # if response wasn't generated above, render a graph image if not response: format = 'image' renderStart = time() response = renderViewGraph(graphOptions, requestOptions, data) if useCache: cache.add(requestKey, response, cacheTimeout) patch_response_headers(response, cache_timeout=cacheTimeout) else: add_never_cache_headers(response) log.rendering('%s rendering time %6f' % (format, time() - renderStart)) log.rendering('Total request processing time %6f' % (time() - start)) return response
def process_response(self, request, response): add_never_cache_headers(response) return response
def process_request(self, request): if settings.MAINTENANCE_MODE or core.get_maintenance_mode(): try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return None except NoReverseMatch: #maintenance_mode.urls not added pass if hasattr(request, 'user'): if settings.MAINTENANCE_MODE_IGNORE_STAFF and request.user.is_staff: return None if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER and request.user.is_superuser: return None if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS and request.user.is_anonymous( ): return None if settings.MAINTENANCE_MODE_EXPIRE_SESSION and request.user.is_authenticated( ): logout(request) if settings.MAINTENANCE_MODE_IGNORE_TESTS: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) or ( len(sys.argv) > 1 and sys.argv[1] == 'test'): #python runtests.py | python manage.py test | python setup.py test | django-admin.py test is_testing = True if is_testing: return None if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS: try: get_client_ip_address_func = import_string( settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS is not a valid function path.' ) else: client_ip_address = get_client_ip_address_func(request) else: client_ip_address = utils.get_client_ip_address(request) for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(client_ip_address): return None if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: url_re = re.compile(url) if url_re.match(request.path_info): return None if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return None return HttpResponseRedirect( settings.MAINTENANCE_MODE_REDIRECT_URL) else: request_context = {} if settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT: try: get_request_context_func = import_string( settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_TEMPLATE_CONTEXT is not a valid function path.' ) else: request_context = get_request_context_func( request=request) if django.VERSION < (1, 8): response = render_to_response( settings.MAINTENANCE_MODE_TEMPLATE, request_context, context_instance=RequestContext(request), content_type='text/html') else: response = render(request, settings.MAINTENANCE_MODE_TEMPLATE, context=request_context, content_type='text/html', status=503) add_never_cache_headers(response) return response else: return None
def __call__(self, request): response = self.get_response(request) add_never_cache_headers(response) return response
def process_response(request, response): if 'vary' in response and 'accept-language' in response['vary'].lower( ): add_never_cache_headers(response) return response
def get(self, request, *args, **kwargs): response = super().get(request, *args, **kwargs) add_never_cache_headers(response) return response
def data(request, **kwargs): # request.is_ajax() if not exit if not request.user.is_authenticated(): print("TODO: manage not auth") return dt = json.loads(request.body) try: ClassificationHead.objects.filter(owner_id=request.user.pk).delete() for classification in dt['classifications']: urban_quan_tot = 0 rural_quan_tot = 0 freq_type = _freq_type2int(classification['freq_type']) head = ClassificationHead(owner_id=request.user.pk, country=classification['country'], freq_type=freq_type, occupancy=_occupancies_encode( classification['occupancies']), notes=classification['notes'], last_mod=timezone.now(), vers=dataset_version) head.save() for bc in classification['build_classes']: try: urban_quan = float(bc['urban_quan']) except ValueError: urban_quan = 0 try: rural_quan = float(bc['rural_quan']) except ValueError: rural_quan = 0 if urban_quan < 0.0 or urban_quan > 1.0: raise ValueError( "%s<b>Error:</b>'urban' frequency out of range" % (_errlog_longheader(classification['country'], classification['occupancies'], bc['path']))) if rural_quan < 0.0 or rural_quan > 1.0: raise ValueError( "%s<b>Error:</b>'rural' frequency out of range" % (_errlog_longheader(classification['country'], classification['occupancies'], bc['path']))) urban_quan_tot += urban_quan rural_quan_tot += rural_quan row = ClassificationRow(owner_id=request.user.pk, head_id=head.pk, path=bc['path'], urban=_freq_qual_type2int(bc['urban']), urban_quan=urban_quan, rural=_freq_qual_type2int(bc['rural']), rural_quan=rural_quan, vers=dataset_version) row.save() if classification['freq_type'] == 'quantitative' and ( is_close(urban_quan_tot, 1.0) is False or is_close(rural_quan_tot, 1.0) is False): raise ValueError( "%s<b>Error:</b> '%s' frequencies sum is %3.3f instead of" " 1.000" % (_errlog_shortheader(classification['country'], classification['occupancies']), ('urban' if urban_quan_tot != 1 else 'rural'), (urban_quan_tot if urban_quan_tot != 1 else rural_quan_tot))) django_version_transaction() resp = {'ret': 0, 'ret_s': 'success'} except Exception as e: transaction.rollback() resp = {'ret': 1, 'ret_s': str(e)} response = HttpResponse(json.dumps(resp, cls=DjangoJSONEncoder), content_type='application/javascript') add_never_cache_headers(response) return response
def handler500(request, template_name="dpaste/500.html"): context = {} context.update(config.extra_template_context) response = render(request, template_name, context, status=500) add_never_cache_headers(response) return response
def renderView(request): start = time() (graphOptions, requestOptions) = parseOptions(request) useCache = 'noCache' not in requestOptions cacheTimeout = requestOptions['cacheTimeout'] requestContext = { 'startTime': requestOptions['startTime'], 'endTime': requestOptions['endTime'], 'localOnly': requestOptions['localOnly'], 'template': requestOptions['template'], 'tzinfo': requestOptions['tzinfo'], 'data': [] } data = requestContext['data'] # First we check the request cache if useCache: requestKey = hashRequest(request) cachedResponse = cache.get(requestKey) if cachedResponse: log.cache('Request-Cache hit [%s]' % requestKey) log.rendering('Returned cached response in %.6f' % (time() - start)) return cachedResponse else: log.cache('Request-Cache miss [%s]' % requestKey) # Now we prepare the requested data if requestOptions['graphType'] == 'pie': for target in requestOptions['targets']: if target.find(':') >= 0: try: name, value = target.split(':', 1) value = float(value) except: raise ValueError("Invalid target '%s'" % target) data.append((name, value)) else: seriesList = evaluateTarget(requestContext, target) for series in seriesList: func = PieFunctions[requestOptions['pieMode']] data.append((series.name, func(requestContext, series) or 0)) elif requestOptions['graphType'] == 'line': # Let's see if at least our data is cached if useCache: targets = requestOptions['targets'] startTime = requestOptions['startTime'] endTime = requestOptions['endTime'] dataKey = hashData(targets, startTime, endTime) cachedData = cache.get(dataKey) if cachedData: log.cache("Data-Cache hit [%s]" % dataKey) else: log.cache("Data-Cache miss [%s]" % dataKey) else: cachedData = None if cachedData is not None: requestContext['data'] = data = cachedData else: # Have to actually retrieve the data now for target in requestOptions['targets']: if not target.strip(): continue t = time() seriesList = evaluateTarget(requestContext, target) log.rendering("Retrieval of %s took %.6f" % (target, time() - t)) data.extend(seriesList) if useCache: cache.add(dataKey, data, cacheTimeout) # If data is all we needed, we're done format = requestOptions.get('format') if format == 'csv': response = HttpResponse(content_type='text/csv') writer = csv.writer(response, dialect='excel') for series in data: for i, value in enumerate(series): timestamp = datetime.fromtimestamp( series.start + (i * series.step), requestOptions['tzinfo']) writer.writerow( (series.name, timestamp.strftime("%Y-%m-%d %H:%M:%S"), value)) return response if format == 'json': series_data = [] if 'maxDataPoints' in requestOptions and any(data): startTime = min([series.start for series in data]) endTime = max([series.end for series in data]) timeRange = endTime - startTime maxDataPoints = requestOptions['maxDataPoints'] for series in data: numberOfDataPoints = timeRange / series.step if maxDataPoints < numberOfDataPoints: valuesPerPoint = math.ceil( float(numberOfDataPoints) / float(maxDataPoints)) secondsPerPoint = int(valuesPerPoint * series.step) # Nudge start over a little bit so that the consolidation bands align with each call # removing 'jitter' seen when refreshing. nudge = secondsPerPoint + ( series.start % series.step) - (series.start % secondsPerPoint) series.start = series.start + nudge valuesToLose = int(nudge / series.step) for r in range(1, valuesToLose): del series[0] series.consolidate(valuesPerPoint) timestamps = range(int(series.start), int(series.end) + 1, int(secondsPerPoint)) else: timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip(series, timestamps) series_data.append( dict(target=series.name, datapoints=datapoints)) elif 'noNullPoints' in requestOptions and any(data): for series in data: values = [] for (index, v) in enumerate(series): if v is not None: timestamp = series.start + (index * series.step) values.append((v, timestamp)) if len(values) > 0: series_data.append( dict(target=series.name, datapoints=values)) else: for series in data: timestamps = range(int(series.start), int(series.end) + 1, int(series.step)) datapoints = zip(series, timestamps) series_data.append( dict(target=series.name, datapoints=datapoints)) if 'jsonp' in requestOptions: response = HttpResponse( content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data, cls=FloatEncoder)), content_type='text/javascript') else: response = HttpResponse(content=json.dumps(series_data, cls=FloatEncoder), content_type='application/json') if useCache: cache.add(requestKey, response, cacheTimeout) patch_response_headers(response, cache_timeout=cacheTimeout) else: add_never_cache_headers(response) log.rendering('Total json rendering time %.6f' % (time() - start)) return response if format == 'dygraph': labels = ['Time'] result = '{}' if data: datapoints = [[ ts ] for ts in range(data[0].start, data[0].end, data[0].step)] for series in data: labels.append(series.name) for i, point in enumerate(series): if point is None: point = 'null' elif point == float('inf'): point = 'Infinity' elif point == float('-inf'): point = '-Infinity' elif math.isnan(point): point = 'null' datapoints[i].append(point) line_template = '[%%s000%s]' % ''.join([', %s'] * len(data)) lines = [ line_template % tuple(points) for points in datapoints ] result = '{"labels" : %s, "data" : [%s]}' % ( json.dumps(labels), ', '.join(lines)) response = HttpResponse(content=result, content_type='application/json') if useCache: cache.add(requestKey, response, cacheTimeout) patch_response_headers(response, cache_timeout=cacheTimeout) else: add_never_cache_headers(response) log.rendering('Total dygraph rendering time %.6f' % (time() - start)) return response if format == 'rickshaw': series_data = [] for series in data: timestamps = range(series.start, series.end, series.step) datapoints = [{ 'x': x, 'y': y } for x, y in zip(timestamps, series)] series_data.append( dict(target=series.name, datapoints=datapoints)) if 'jsonp' in requestOptions: response = HttpResponse( content="%s(%s)" % (requestOptions['jsonp'], json.dumps(series_data)), mimetype='text/javascript') else: response = HttpResponse(content=json.dumps(series_data), content_type='application/json') if useCache: cache.add(requestKey, response, cacheTimeout) patch_response_headers(response, cache_timeout=cacheTimeout) else: add_never_cache_headers(response) log.rendering('Total rickshaw rendering time %.6f' % (time() - start)) return response if format == 'raw': response = HttpResponse(content_type='text/plain') for series in data: response.write( "%s,%d,%d,%d|" % (series.name, series.start, series.end, series.step)) response.write(','.join(map(repr, series))) response.write('\n') log.rendering('Total rawData rendering time %.6f' % (time() - start)) return response if format == 'svg': graphOptions['outputFormat'] = 'svg' elif format == 'pdf': graphOptions['outputFormat'] = 'pdf' if format == 'pickle': response = HttpResponse(content_type='application/pickle') seriesInfo = [series.getInfo() for series in data] pickle.dump(seriesInfo, response, protocol=-1) log.rendering('Total pickle rendering time %.6f' % (time() - start)) return response # We've got the data, now to render it graphOptions['data'] = data if settings.REMOTE_RENDERING: # Rendering on other machines is faster in some situations image = delegateRendering(requestOptions['graphType'], graphOptions) else: image = doImageRender(requestOptions['graphClass'], graphOptions) useSVG = graphOptions.get('outputFormat') == 'svg' if useSVG and 'jsonp' in requestOptions: response = HttpResponse(content="%s(%s)" % (requestOptions['jsonp'], json.dumps(image)), content_type='text/javascript') elif graphOptions.get('outputFormat') == 'pdf': response = buildResponse(image, 'application/x-pdf') else: response = buildResponse(image, 'image/svg+xml' if useSVG else 'image/png') if useCache: cache.add(requestKey, response, cacheTimeout) patch_response_headers(response, cache_timeout=cacheTimeout) else: add_never_cache_headers(response) log.rendering('Total rendering time %.6f seconds' % (time() - start)) return response
def view_assessments_list(request): assessmentrows = AssessmentRow.objects.all().order_by( '-assessment__term', 'student__user__username') if not request.user.is_superuser: assessmentrows = assessmentrows.filter(student=request.user.student) customSearch = request.GET.get('sSearch', '').rstrip().encode('utf-8') if customSearch != '': kwargzs = [ { "assessment__term__icontains": customSearch }, { "student__realname__icontains": customSearch }, { "student__user__username__icontains": customSearch }, { "student__theclass__classid__icontains": customSearch }, ] outputQ = None for kwargz in kwargzs: outputQ = outputQ | Q(**kwargz) if outputQ else Q(**kwargz) assessmentrows = assessmentrows.filter(outputQ) cols = int(request.GET.get('iColumns', 0)) #获取有多少列数据 iDisplayLength = min(int(request.GET.get('iDisplayLength', 10)), 100) #每页获取rows个数 startRecord = int(request.GET.get('iDisplayStart', 0)) #本页第一条数据,是所有数据的第几个,从0开始 endRecord = startRecord + iDisplayLength sEcho = int(request.GET.get('sEcho', 0)) #页数 iTotalRecords = iTotalDisplayRecords = assessmentrows.count() #总共的rows数 assessmentrows = assessmentrows[startRecord:endRecord] aaData = [[ unicode(i.assessment.term), unicode(i.student.realname), unicode(i.student.user.username), unicode(i.student.theclass.classid), unicode(i.excellent), unicode(i.good), unicode(i.ordinary), ] for i in assessmentrows] response_dict = {} response_dict.update({'aaData': aaData}) response_dict.update({ 'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords': iTotalDisplayRecords }) response = HttpResponse(simplejson.dumps(response_dict), mimetype='application/json') #阻止缓存 add_never_cache_headers(response) return response
def process_response(self, request, response): """Set no-cache policy to response.""" add_never_cache_headers(response) return response