Example #1
0
def get_data(request, report, fmt, conf=""):
    DataProvider.fmt = fmt
    Logger.Message("Executing: " + conf)

    qd = QueryDict(conf)
    conf = qd.dict()

    if('async' in conf):
        url = log_async(report, fmt, conf)
        return HttpResponse("Request logged. Please donwload from: " + url)

    r = Reports()
    data = r.GetData(report, fmt, conf)

    if not data:
        return HttpResponse("Report does not exist")

    download = False
    if('gzip' in conf):
        fn = report + "." + fmt + ".gz"
        data = compress_string(data)
        c_type, download = get_mime_type('gzip')
    else:
        fn = report + "." + fmt
        c_type, download = get_mime_type(fmt)

    response = HttpResponse(data, content_type=c_type)

    if(download):
        response['Content-Disposition'] = 'attachment; filename="' + fn + '"'

    return response
Example #2
0
    def process_response(self, request, response):
        # It's not worth compressing non-OK or really short responses.
        if response.status_code != 200 or len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # Older versions of IE have issues with gzipped pages containing either
        # Javascript and PDF.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if "javascript" in ctype or ctype == "application/pdf":
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #3
0
    def process_response(self, request, response):
        # It's not worth compressing non-OK or really short responses.
        if response.status_code != 200 or len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding', ))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #4
0
 def get_prep_value(self, value):
     if value is not None:
         if isinstance(value, unicode):
             value = value.encode('utf8')
         value = compress_string(value)
         value = value.encode('base64').decode('utf8')
     return value
Example #5
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        # Return the compressed content only if it's actually shorter.
        compressed_content = compress_string(response.content)
        if len(compressed_content) >= len(response.content):
            return response

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])

        response.content = compressed_content
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #6
0
def _process_response(request, response):
	from django.utils.text  import compress_string
	from django.utils.cache import patch_vary_headers
	import re
	from datetime import datetime, timedelta

	if not response.has_header('Expires') or not response.has_header('Cache-Control'):
#		response['Expires'] = (datetime.now() + timedelta(days=3)).strftime('%A %d %b %Y 00:00:00 GMT')
		response['Cache-Control'] = 'public, must-revalidate'

	# It's not worth compressing non-OK or really short responses.
	if response.status_code != 200 or len(response.content) < 200:
		return response
			
	patch_vary_headers(response, ('Accept-Encoding',))
			
	# Avoid gzipping if we've already got a content-encoding.
	if response.has_header('Content-Encoding'):
		return response
			
	# MSIE have issues with gzipped respones of various content types.
	if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
		ctype = response.get('Content-Type', '').lower()
		if not ctype.startswith("text/") or "javascript" in ctype:
			return response
			
	ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
	if 'gzip' not in ae:
		return response
			
	response.content = compress_string(response.content)
	response['Content-Encoding'] = 'gzip'
	response['Content-Length'] = str(len(response.content))
	return response
Example #7
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding', ))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        # Return the compressed content only if it's actually shorter.
        compressed_content = compress_string(response.content)
        if len(compressed_content) >= len(response.content):
            return response

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])

        response.content = compressed_content
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #8
0
        def inner(self, request, *args, **kwargs):
            response = func(self, request, *args, **kwargs)

            if (config and config.is_debug_mode and config.is_monolith_service
                    and not config.is_testing_env):
                return response

            # Before we can access response.content, the response needs to be rendered.
            response = self.finalize_response(request, response, *args,
                                              **kwargs)
            response.render(
            )  # should be rendered, before picklining while storing to cache

            compressed_content = compress_string(response.content)

            # Ensure that the compressed content is actually smaller than the original.
            if len(compressed_content) >= len(response.content):
                return response

            # Replace content with gzipped variant, update respective headers.
            response.content = compressed_content
            response["Content-Length"] = str(len(response.content))
            response["Content-Encoding"] = "gzip"

            return response
Example #9
0
File: gzip.py Project: lapbay/milan
	def process_response(self, request, response):
		# It's not worth compressing non-OK or really short responses.
		if response.status_code != 200 or len(response.content) < 200:
			return response

		patch_vary_headers(response, ('Accept-Encoding',))

		# Avoid gzipping if we've already got a content-encoding.
		if response.has_header('Content-Encoding'):
			return response

		# MSIE have issues with gzipped respones of various content types.
		if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
			ctype = response.get('Content-Type', '').lower()
			if not ctype.startswith("text/") or "javascript" in ctype:
				return response

		ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
		if not re_accepts_gzip.search(ae):
			return response

		response.content = compress_string(response.content)
		response['Content-Encoding'] = 'gzip'
		response['Content-Length'] = str(len(response.content))
		return response
Example #10
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if not response.streaming and len(response.content) < 200:
            return response

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        patch_vary_headers(response, ('Accept-Encoding', ))

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(
                response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
        response['Content-Encoding'] = 'gzip'

        return response
Example #11
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if not getattr(response, 'streaming', None) and len(response.content) < 200:
            return response

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if getattr(response, 'streaming', None):
            # Delete the `Content-Length` header for streaming content, because
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
        response['Content-Encoding'] = 'gzip'

        return response
Example #12
0
    def process_response(self, request, response):
        # It's not worth compressing non-OK or really short responses.
        if response.status_code != 200 or len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding', ))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # Older versions of IE have issues with gzipped pages containing either
        # Javascript and PDF.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if "javascript" in ctype or ctype == "application/pdf":
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #13
0
 def compress(self, text):
     """
     Returns a compressed version of the given text, using the same
     function as Django's gzip decorator and middleware.
     
     """
     return compress_string(text)
Example #14
0
 def get_db_prep_save(self, value, connection):
     if value is not None:
         if isinstance(value, unicode):
             value = value.encode('utf-8', 'replace')
         value = compress_string(value)
     return models.TextField.get_db_prep_save(self,
                                              value,
                                              connection=connection)
Example #15
0
 def save_page_node(self, html):
     url = "http://%s/original_page/%s" % (
         settings.ORIGINAL_PAGE_SERVER,
         self.feed.pk,
     )
     response = requests.post(url, files={
         'original_page': compress_string(html),
     })
     if response.status_code == 200:
         return True
Example #16
0
 def get_prep_value(self, value):
     """
     Compress the text data.
     """
     if value is not None:
         if isinstance(value, str):
             value = value.encode('utf8')
         value = compress_string(value)
         value = value.encode('base64').decode('utf8')  # lint-amnesty, pylint: disable=invalid-str-codec
     return value
Example #17
0
 def get_prep_value(self, value):
     """
     Compress the text data.
     """
     if value is not None:
         if isinstance(value, six.text_type):
             value = value.encode('utf8')
         value = compress_string(value)
         value = value.encode('base64').decode('utf8')
     return value
Example #18
0
def compressResult(result):
    #response = HttpResponse(result,mimetype="application/json")
    response = JsonResponse(result, safe=False)
    #response['Access-Control-Allow-Origin'] = '*'

    compressed_content = compress_string(response.content)
    if len(compressed_content) >= len(response.content):
        return response
    response.content = compressed_content
    response['Content-Length'] = str(len(response.content))
    response['Content-Encoding'] = 'gzip'
    return response
Example #19
0
    def process_response(self, request, response):
        patch_vary_headers(response, ('Accept-Encoding',))
        if response.has_header('Content-Encoding'):
            return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        return response
Example #20
0
File: Ajax.py Project: SwoJa/potato
def compressResult(result):
    #response = HttpResponse(result,mimetype="application/json")
    response = JsonResponse(result,safe=False)
    #response['Access-Control-Allow-Origin'] = '*'

    compressed_content = compress_string(response.content)
    if len(compressed_content) >= len(response.content):
        return response
    response.content = compressed_content
    response['Content-Length'] = str(len(response.content))
    response['Content-Encoding'] = 'gzip'
    return response
Example #21
0
def get_available_checksums_from_remote(channel_id, peer_id):
    """
    The current implementation prioritizes minimising requests to the remote server.
    In order to achieve this, it caches based on the baseurl and the channel_id.
    Also, it POSTs the complete list of non-supplementary files to the rmeote endpoint,
    and thus can keep this representation cached regardless of how the availability on
    the local server has changed in the interim.
    """
    try:
        baseurl = NetworkLocation.objects.values_list(
            "base_url", flat=True).get(id=peer_id)
    except NetworkLocation.DoesNotExist:
        raise LocationError("Peer with id {} does not exist".format(peer_id))

    CACHE_KEY = "PEER_AVAILABLE_CHECKSUMS_{baseurl}_{channel_id}".format(
        baseurl=baseurl, channel_id=channel_id)
    if CACHE_KEY not in cache:

        channel_checksums = (LocalFile.objects.filter(
            files__contentnode__channel_id=channel_id,
            files__supplementary=False).values_list("id",
                                                    flat=True).distinct())

        response = requests.post(
            get_file_checksums_url(channel_id, baseurl),
            data=compress_string(
                bytes(json.dumps(list(channel_checksums)).encode("utf-8"))),
            headers={"content-type": "application/gzip"},
        )

        checksums = None

        # Do something if we got a successful return
        if response.status_code == 200:
            try:
                integer_mask = int(response.content)

                # Filter to avoid passing in bad checksums
                checksums = set(
                    compress(channel_checksums,
                             _generate_mask_from_integer(integer_mask)))
                cache.set(CACHE_KEY, checksums, 3600)
            except (ValueError, TypeError):
                # Bad JSON parsing will throw ValueError
                # If the result of the json.loads is not iterable, a TypeError will be thrown
                # If we end up here, just set checksums to None to allow us to cleanly continue
                pass
    else:
        checksums = cache.get(CACHE_KEY)
    return checksums
Example #22
0
    def process_response(self, request, response):
        response['X-Gzip-Delta'] = 0
        self.time = time.time()

        # It's not worth attempting to compress really short responses.
        if not response.streaming and len(response.content) < 200:
            return response

        ##Avoid gzipping if we've already got a content-encoding.
        #if response.has_header('Content-Encoding'):
            ##return response
             #del response['Content-Encoding']

        patch_vary_headers(response, ('Accept-Encoding',))

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.

            compressed_content = compress_string(response.content)
            len_compressed_content = len(compressed_content)
            len_response_content = len(response.content)

            #gzip_factor = 1.0 * len_response_content / len_compressed_content
            #response['X-Gzip-Factor'] = gzip_factor
            #
            gzip_delta = len_response_content - len_compressed_content
            response['X-Gzip-Delta'] = gzip_delta
            if gzip_delta < 0:
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
        response['Content-Encoding'] = 'gzip'


        return response
Example #23
0
    def process_response(self, request, response):
        patch_vary_headers(response, ('Accept-Encoding',))
        
        # Avoid gzipping if we've already got a content-encoding or if the
        # content-type is Javascript (silly IE...)
        is_js = "javascript" in response.headers.get('Content-Type', '').lower()
        if response.has_header('Content-Encoding') or is_js:
            return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
    def process_response(self, request, response):
        patch_vary_headers(response, ('Accept-Encoding', ))

        # Avoid gzipping if we've already got a content-encoding or if the
        # content-type is Javascript (silly IE...)
        is_js = "javascript" in response.headers.get('Content-Type',
                                                     '').lower()
        if response.has_header('Content-Encoding') or is_js:
            return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #25
0
File: gzip.py Project: nbsky/django
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        # 太小不值得压缩
        if not response.streaming and len(response.content) < 200:
            return response

        # Avoid gzipping if we've already got a content-encoding.
        # 如果已经是被压缩过过的也不用处理
        if response.has_header('Content-Encoding'):
            return response

        # 确保Accept-Encoding在vary里面,因为这压缩和不压缩得告诉缓存服务器这是两个不不一样的,缓存服务器是通过url和vary标记的header的信息做hash来索引数据
        patch_vary_headers(response, ('Accept-Encoding',))

        # request里面如果不支持gzip也不压缩
        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because
            # we won't know the compressed size until we stream it.
            # 如果是文件流是持续传输的要持续多次压缩传输,无法指定整个response大小否则客户端接收可能会有问题
            response.streaming_content = compress_sequence(response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.
            # 压缩后没有变小也不压缩
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            # 更新body, 和Content-Length(body的大小)
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        # 如果启用了etag,我们应该标注这个etag是gzip后的,因为内容实体被压缩,理论上应该从新计算etag但是没有必要标注下即可
        # 这里在原来的etag后面加了';gzip'字符串
        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])

        # 写上压缩算法
        response['Content-Encoding'] = 'gzip'

        return response
Example #26
0
        def inner(self, request, *args, **kwargs):
            response = func(self, request, *args, **kwargs)

            # Before we can access response.content, the response needs to be rendered.
            response = self.finalize_response(request, response, *args, **kwargs)
            response.render()  # should be rendered, before picklining while storing to cache

            compressed_content = compress_string(response.content)

            # Ensure that the compressed content is actually smaller than the original.
            if len(compressed_content) >= len(response.content):
                return response

            # Replace content with gzipped variant, update respective headers.
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))
            response['Content-Encoding'] = 'gzip'

            return response
Example #27
0
    def process_response(self, request, response):
        # The response object can tell us whether content is a string or an iterable
        # It's not worth attempting to compress really short responses.
        if not response._base_content_is_iter and len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        # The response object can tell us whether content is a string or an iterable
        if response._base_content_is_iter:
            # If the response content is iterable we don't know the length, so delete the header.
            del response['Content-Length']
            # Wrap the response content in a streaming gzip iterator (direct access to inner response._container)
            response.content = compress_sequence(response._container)
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])

        response['Content-Encoding'] = 'gzip'

        return response
Example #28
0
 def save_page_s3(self, html):
     k = Key(settings.S3_PAGES_BUCKET)
     k.key = self.feed.s3_pages_key
     k.set_metadata('Content-Encoding', 'gzip')
     k.set_metadata('Content-Type', 'text/html')
     k.set_metadata('Access-Control-Allow-Origin', '*')
     k.set_contents_from_string(compress_string(html))
     k.set_acl('public-read')
     
     try:
         feed_page = MFeedPage.objects.get(feed_id=self.feed.pk)
         feed_page.delete()
         logging.debug('   ---> [%-30s] ~FYTransfering page data to S3...' % (self.feed))
     except MFeedPage.DoesNotExist:
         pass
         
     if not self.feed.s3_page:
         self.feed.s3_page = True
         self.feed.save()
     
     return True
Example #29
0
def layer_data_export(request, dataset_id, layer_name):
    if not dataset_id in DATASETS:
        raise Http404('Dataset %s not found' % dataset_id)

    layer = get_object_or_404(Layer, pk=layer_name)
    if not osm.has_layer(dataset_id, layer):
        raise Http404('Layer %s not available in dataset %s' % (layer_name, DATASETS[dataset_id]))
    
    ctx = {
        'dataset_id': dataset_id,
        'dataset_name': DATASETS[dataset_id],
        'layer': layer,
        'title': 'Export %s from %s' % (str(layer), DATASETS[dataset_id]),
    }
    if request.method == "POST":
        form = BoundsForm(request.REQUEST)
        if form.is_valid():
            try:
                data = osm.export(dataset_id, layer, form.cleaned_data['bounds'])
            except osm.Error, e:
                ctx['error'] = str(e)
            else:
                if 'preview' in request.REQUEST:
                    ctx['preview_content'] = data
                else:
                    # download
                    filename = "%s.osc" % layer_name
                    if 'download_gz' in request.REQUEST:
                        data = text.compress_string(data)
                        filename += ".gz"
                        content_type = 'application/x-gzip'
                    else:
                        content_type = 'text/xml'
                    response = HttpResponse(data, content_type=content_type)
                    response['Content-Disposition'] = 'attachment; filename=%s' % filename
                    
                    if content_type == 'application/x-gzip':
                        response['Content-Encoding'] = ''
                    
                    return response
Example #30
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses. 如果太过小的包就直接返回
        if not response.streaming and len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding', ))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):  #如果已经存在压缩编码处理,就直接放弃
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because 压缩后,content-length 应该会变小
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(
                response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):  #看值不值得压缩
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
        response['Content-Encoding'] = 'gzip'

        return response
Example #31
0
def test_api_safe(client, section_doc, section_case, if_none_match, method):
    """
    Test GET & HEAD on wiki.document_api endpoint.
    """
    section_id, exp_content = SECTION_CASE_TO_DETAILS[section_case]

    url = section_doc.get_absolute_url() + '$api'

    if section_id:
        url += '?section={}'.format(section_id)

    headers = dict(HTTP_ACCEPT_ENCODING='gzip')

    if if_none_match == 'match':
        response = getattr(client, method.lower())(url, **headers)
        assert 'etag' in response
        headers['HTTP_IF_NONE_MATCH'] = response['etag']
    elif if_none_match == 'mismatch':
        headers['HTTP_IF_NONE_MATCH'] = 'ABC'

    response = getattr(client, method.lower())(url, **headers)

    if if_none_match == 'match':
        exp_content = ''
        assert response.status_code == 304
    else:
        assert response.status_code == 200
        assert 'public' in response['Cache-Control']
        assert 's-maxage' in response['Cache-Control']
        assert 'etag' in response
        assert 'x-kuma-revision' in response
        assert 'last-modified' not in response
        assert '"{}"'.format(calculate_etag(exp_content)) in response['etag']
        assert (response['x-kuma-revision'] == str(
            section_doc.current_revision_id))

    if method == 'GET':
        if response.get('content-encoding') == 'gzip':
            exp_content = compress_string(exp_content)
        assert response.content == exp_content
Example #32
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if not response.streaming and len(response.content) < 200:
            return response

        patch_vary_headers(response, ("Accept-Encoding",))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header("Content-Encoding"):
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get("HTTP_USER_AGENT", "").lower():
            ctype = response.get("Content-Type", "").lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get("HTTP_ACCEPT_ENCODING", "")
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(response.streaming_content)
            del response["Content-Length"]
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            response.content = compressed_content
            response["Content-Length"] = str(len(response.content))

        if response.has_header("ETag"):
            response["ETag"] = re.sub('"$', ';gzip"', response["ETag"])
        response["Content-Encoding"] = "gzip"

        return response
Example #33
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses. 如果太过小的包就直接返回
        if not response.streaming and len(response.content) < 200:
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'): #如果已经存在压缩编码处理,就直接放弃
            return response

        # MSIE have issues with gzipped response of various content types.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if not ctype.startswith("text/") or "javascript" in ctype:
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because 压缩后,content-length 应该会变小
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content): #看值不值得压缩
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        if response.has_header('ETag'):
            response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
        response['Content-Encoding'] = 'gzip'

        return response
Example #34
0
def test_api_safe(client, section_doc, section_case, if_none_match, method):
    """
    Test GET & HEAD on wiki.document_api endpoint.
    """
    section_id, exp_content = SECTION_CASE_TO_DETAILS[section_case]

    url = section_doc.get_absolute_url() + '$api'

    if section_id:
        url += '?section={}'.format(section_id)

    headers = dict(HTTP_ACCEPT_ENCODING='gzip')

    if if_none_match == 'match':
        response = getattr(client, method.lower())(url, **headers)
        assert 'etag' in response
        headers['HTTP_IF_NONE_MATCH'] = response['etag']
    elif if_none_match == 'mismatch':
        headers['HTTP_IF_NONE_MATCH'] = 'ABC'

    response = getattr(client, method.lower())(url, **headers)

    if if_none_match == 'match':
        exp_content = ''
        assert response.status_code == 304
    else:
        assert response.status_code == 200
        assert 'etag' in response
        assert 'x-kuma-revision' in response
        assert 'last-modified' not in response
        assert '"{}"'.format(calculate_etag(exp_content)) in response['etag']
        assert (response['x-kuma-revision'] ==
                str(section_doc.current_revision_id))

    if method == 'GET':
        if response.get('content-encoding') == 'gzip':
            exp_content = compress_string(exp_content)
        assert response.content == exp_content
Example #35
0
    def process_response(self, request, response):
        # It's not worth compressing non-OK or really short responses.
        # omeroweb: the tradeoff for less than 8192k of uncompressed text is
        # not worth it most of the times.
        if response.status_code != 200 or len(response.content) < 8192:
            return response

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header("Content-Encoding"):
            return response

        # omeroweb: we don't want to compress everything, so doing an opt-in
        # approach
        ctype = response.get("Content-Type", "").lower()
        if "javascript" not in ctype and "text" not in ctype:
            return response

        patch_vary_headers(response, ("Accept-Encoding", ))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header("Content-Encoding"):
            return response

        # Older versions of IE have issues with gzipped pages containing either
        # Javascript and PDF.
        if "msie" in request.META.get("HTTP_USER_AGENT", "").lower():
            ctype = response.get("Content-Type", "").lower()
            if "javascript" in ctype or ctype == "application/pdf":
                return response

        ae = request.META.get("HTTP_ACCEPT_ENCODING", "")
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response["Content-Encoding"] = "gzip"
        response["Content-Length"] = str(len(response.content))
        return response
    def process_response(self, request, response):
        # It's not worth compressing non-OK or really short responses.
        # omeroweb: the tradeoff for less than 8192k of uncompressed text is
        # not worth it most of the times.
        if response.status_code != 200 or len(response.content) < 8192:
            return response

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # omeroweb: we don't want to compress everything, so doing an opt-in
        # approach
        ctype = response.get('Content-Type', '').lower()
        if "javascript" not in ctype and "text" not in ctype:
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        # Older versions of IE have issues with gzipped pages containing either
        # Javascript and PDF.
        if "msie" in request.META.get('HTTP_USER_AGENT', '').lower():
            ctype = response.get('Content-Type', '').lower()
            if "javascript" in ctype or ctype == "application/pdf":
                return response

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        response.content = compress_string(response.content)
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(response.content))
        return response
Example #37
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if not response.streaming and len(response.content) < 200:
            return response

        # Avoid gzipping if we've already got a sections-encoding.
        if response.has_header('Content-Encoding'):
            return response

        patch_vary_headers(response, ('Accept-Encoding', ))

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming sections, because
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(
                response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed sections only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        # If there is a strong ETag, make it weak to fulfill the requirements
        # of RFC 7232 section-2.1 while also allowing conditional request
        # matches on ETags.
        etag = response.get('ETag')
        if etag and etag.startswith('"'):
            response['ETag'] = 'W/' + etag
        response['Content-Encoding'] = 'gzip'

        return response
Example #38
0
    def process_response(self, request, response):
        # It's not worth attempting to compress really short responses.
        if not response.streaming and len(response.content) < 200:
            return response

        # Avoid gzipping if we've already got a content-encoding.
        if response.has_header('Content-Encoding'):
            return response

        patch_vary_headers(response, ('Accept-Encoding',))

        ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
        if not re_accepts_gzip.search(ae):
            return response

        if response.streaming:
            # Delete the `Content-Length` header for streaming content, because
            # we won't know the compressed size until we stream it.
            response.streaming_content = compress_sequence(response.streaming_content)
            del response['Content-Length']
        else:
            # Return the compressed content only if it's actually shorter.
            compressed_content = compress_string(response.content)
            if len(compressed_content) >= len(response.content):
                return response
            response.content = compressed_content
            response['Content-Length'] = str(len(response.content))

        # If there is a strong ETag, make it weak to fulfill the requirements
        # of RFC 7232 section-2.1 while also allowing conditional request
        # matches on ETags.
        etag = response.get('ETag')
        if etag and etag.startswith('"'):
            response['ETag'] = 'W/' + etag
        response['Content-Encoding'] = 'gzip'

        return response
Example #39
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        raise Http404

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if not cacheData is None:
        if cacheData.has_key('ETag'):
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if cacheData.has_key('Last-Modified'):
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE',
                                                 None)
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    # Add core
    content.append(getFileContents("tiny_mce%s.js" % suffix))

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(getFileContents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(
            getFileContents("themes/%s/editor_template%s.js" %
                            (theme, suffix)))

        for lang in languages:
            content.append(
                getFileContents("themes/%s/langs/%s.js" % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(
            getFileContents("plugins/%s/editor_plugin%s.js" %
                            (plugin, suffix)))

        for lang in languages:
            content.append(
                getFileContents("plugins/%s/langs/%s.js" % (plugin, lang)))

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    # Compress
    if compress:
        content = compress_string(''.join(content))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(cacheKey, {
        'Last-Modified': response['Last-Modified'],
        'ETag': response['ETag'],
    })
    return response
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        raise Http404

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if not cacheData is None:
        if cacheData.has_key('ETag'):
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if cacheData.has_key('Last-Modified'):
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    # Add core
    content.append(getFileContents("tiny_mce%s.js" % suffix))

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(getFileContents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(getFileContents("themes/%s/editor_template%s.js"%  (theme, suffix)))

        for lang in languages:
            content.append(getFileContents("themes/%s/langs/%s.js" % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(getFileContents("plugins/%s/editor_plugin%s.js" % (plugin, suffix)))

        for lang in languages:
            content.append(getFileContents("plugins/%s/langs/%s.js" % (plugin, lang)))

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    # Compress
    if compress:
        content = compress_string(''.join(content))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(cacheKey, {
        'Last-Modified': response['Last-Modified'],
        'ETag': response['ETag'],
    })
    return response
Example #41
0
 def get_db_prep_value(self, value):
     if value is not None:
         value = base64.encodestring(compress_string(pickle.dumps(value)))
     return value
Example #42
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    files = split_commas(request.GET.get("files", ""))
    source = request.GET.get("src", "") == "true"
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        response.write(
            render_to_string("tinymce/tiny_mce_gzip.js",
                             {"base_url": tinymce.settings.JS_BASE_URL}))
        return response

    patch_vary_headers(response, ["Accept-Encoding"])

    now = datetime.utcnow()
    response["Date"] = now.strftime("%a, %d %b %Y %H:%M:%S GMT")

    cacheKey = "|".join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if cacheData is not None:
        if "ETag" in cacheData:
            if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
            if if_none_match == cacheData["ETag"]:
                response.status_code = 304
                response.content = ""
                response["Content-Length"] = "0"
                return response

        if "Last-Modified" in cacheData:
            if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
            if if_modified_since == cacheData["Last-Modified"]:
                response.status_code = 304
                response.content = ""
                response["Content-Length"] = "0"
                return response

    tinyMCEPreInit = {
        "base": tinymce.settings.JS_BASE_URL,
        "suffix": "",
    }
    content.append("var tinyMCEPreInit={};".format(json.dumps(tinyMCEPreInit)))

    # Add core
    files = ["tinymce"]

    # Add core languages
    for lang in languages:
        files.append("langs/{}".format(lang))

    # Add plugins
    for plugin in plugins:
        files.append("plugins/{}/plugin".format(plugin))

        for lang in languages:
            files.append("plugins/{}/langs/{}".format(plugin, lang))

    # Add themes
    for theme in themes:
        files.append("themes/{}/theme".format(theme))

        for lang in languages:
            files.append("themes/{}/langs/{}".format(theme, lang))

    for f in files:
        # Check for unsafe characters
        if not safe_filename_re.match(f):
            continue
        content.append(get_file_contents(f, source=source))

    # Restore loading functions
    content.append(
        'tinymce.each("{}".split(",")'.format(",".join(files)) +
        ', function(f){tinymce.ScriptLoader.markDone(tinyMCE.baseURL+"/"+f+".js");});'
    )

    unicode_content = []
    for i, c in enumerate(content):
        try:
            unicode_content.append(c.decode("latin-1"))
        except AttributeError:
            # python 3 way
            unicode_content.append(smart_text(c))
        except UnicodeDecodeError:
            try:
                unicode_content.append(c.decode("utf-8"))
            except Exception:
                print("{} is nor latin-1 nor utf-8.".format(files[i]))
                raise

    # Compress
    if compress:
        content = compress_string(b"".join(
            [c.encode("utf-8") for c in unicode_content]))
        response["Content-Encoding"] = "gzip"
        response["Content-Length"] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    if not response.has_header("Last-Modified"):
        response["Last-Modified"] = http_date()
    cache.set(
        cacheKey,
        {
            "Last-Modified": response["Last-Modified"],
            "ETag": response.get("ETag", "")
        },
    )
    return response
Example #43
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        response.write(render_to_string('tinymce/tiny_mce_gzip.js', {
            'base_url': tinymce.settings.JS_BASE_URL,
        }, context_instance=RequestContext(request)))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    # Add core, with baseURL added
    content.append(get_file_contents("tiny_mce%s.js" % suffix).replace(
            "tinymce._init();", "tinymce.baseURL='%s';tinymce._init();"
            % tinymce.settings.JS_BASE_URL))

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(get_file_contents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(get_file_contents("themes/%s/editor_template%s.js"
                % (theme, suffix)))

        for lang in languages:
            content.append(get_file_contents("themes/%s/langs/%s.js"
                    % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(get_file_contents("plugins/%s/editor_plugin%s.js"
                % (plugin, suffix)))

        for lang in languages:
            content.append(get_file_contents("plugins/%s/langs/%s.js"
                    % (plugin, lang)))

    # Add filebrowser
    if tinymce.settings.USE_FILEBROWSER:
        content.append(render_to_string('tinymce/filebrowser.js', {},
            context_instance=RequestContext(request)).encode("utf-8"))

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    # Compress
    if compress:
        content = compress_string(''.join(content))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    return response
 def get_db_prep_save(self, value, connection):
     if value is not None:
         value = compress_string(value.encode("utf-8"))
     return models.TextField.get_db_prep_save(self, value, connection=connection)
Example #45
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        mce_config = tinymce.settings.DEFAULT_CONFIG
        compressor_config = {
            'plugins': mce_config.get('plugins', ''),
            'themes': mce_config.get('theme', 'advanced'),
            'languages': mce_config.get('language', 'en'),
            'diskcache': "true",
            'debug': "false",
        }
        response.write(
            render_to_string('tinymce/tiny_mce_gzip.js', {
                'base_url': tinymce.settings.JS_BASE_URL,
                'compressor_config': compressor_config,
            },
                             context_instance=RequestContext(request)))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if not cacheData is None:
        if cacheData.has_key('ETag'):
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if cacheData.has_key('Last-Modified'):
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE',
                                                 None)
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    # Add core, with baseURL added
    content.append(get_file_contents("tiny_mce%s.js" % suffix))
    content.append('tinyMCE.baseURL="%s"' % tinymce.settings.JS_BASE_URL)

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(get_file_contents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(
            get_file_contents("themes/%s/editor_template%s.js" %
                              (theme, suffix)))

        for lang in languages:
            content.append(
                get_file_contents("themes/%s/langs/%s.js" % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(
            get_file_contents("plugins/%s/editor_plugin%s.js" %
                              (plugin, suffix)))

        for lang in languages:
            content.append(
                get_file_contents("plugins/%s/langs/%s.js" % (plugin, lang)))

    # Add filebrowser
    if tinymce.settings.USE_FILEBROWSER:
        content.append(
            render_to_string(
                'tinymce/filebrowser.js', {},
                context_instance=RequestContext(request)).encode("utf-8"))

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    print compress
    content = '\n'.join(content)
    # Compress
    if compress:
        content = compress_string(content)
        response['Content-Encoding'] = 'gzip'

    response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(
        cacheKey, {
            'Last-Modified': response['Last-Modified'],
            'ETag': response.get('ETag', ''),
        })
    return response
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        response.write(
            render_to_string(
                "tinymce/tiny_mce_gzip.js",
                {"base_url": tinymce.settings.JS_BASE_URL},
                context_instance=RequestContext(request),
            )
        )
        return response

    patch_vary_headers(response, ["Accept-Encoding"])

    now = datetime.utcnow()
    response["Date"] = now.strftime("%a, %d %b %Y %H:%M:%S GMT")

    cacheKey = "|".join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if not cacheData is None:
        if cacheData.has_key("ETag"):
            if_none_match = request.META.get("HTTP_IF_NONE_MATCH", None)
            if if_none_match == cacheData["ETag"]:
                response.status_code = 304
                response.content = ""
                response["Content-Length"] = "0"
                return response

        if cacheData.has_key("Last-Modified"):
            if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE", None)
            if if_modified_since == cacheData["Last-Modified"]:
                response.status_code = 304
                response.content = ""
                response["Content-Length"] = "0"
                return response

    # Add core, with baseURL added
    content.append(
        get_file_contents("tiny_mce%s.js" % suffix).replace(
            "tinymce._init();", "tinymce.baseURL='%s';tinymce._init();" % tinymce.settings.JS_BASE_URL
        )
    )

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(get_file_contents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(get_file_contents("themes/%s/editor_template%s.js" % (theme, suffix)))

        for lang in languages:
            content.append(get_file_contents("themes/%s/langs/%s.js" % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(get_file_contents("plugins/%s/editor_plugin%s.js" % (plugin, suffix)))

        for lang in languages:
            content.append(get_file_contents("plugins/%s/langs/%s.js" % (plugin, lang)))

    # Add filebrowser
    if tinymce.settings.USE_FILEBROWSER:
        content.append(
            render_to_string("tinymce/filebrowser.js", {}, context_instance=RequestContext(request)).encode("utf-8")
        )

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    # Compress
    if compress:
        content = compress_string("".join(content))
        response["Content-Encoding"] = "gzip"
        response["Content-Length"] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(cacheKey, {"Last-Modified": response["Last-Modified"], "ETag": response["ETag"]})
    return response
Example #47
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        response.write(render_to_string('tinymce/tiny_mce_gzip.js', {
            'base_url': tinymce_settings.JS_BASE_URL,
        }, context_instance=RequestContext(request)))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if not cacheData is None:
        if 'ETag' in cacheData:
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if 'Last-Modified' in cacheData:
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    content.append("var tinyMCEPreInit={base:'%s',suffix:''};" % tinymce_settings.JS_BASE_URL);

    # Add core
    files = ["tiny_mce"]

    # Add core languages
    for lang in languages:
        files.append("langs/%s" % lang)

    # Add plugins
    for plugin in plugins:
        files.append("plugins/%s/editor_plugin%s" % (plugin, suffix))

        for lang in languages:
            files.append("plugins/%s/langs/%s" % (plugin, lang))

    # Add themes
    for theme in themes:
        files.append("themes/%s/editor_template%s" % (theme, suffix))

        for lang in languages:
            files.append("themes/%s/langs/%s" % (theme, lang))

    for f in files:
        # Check for unsafe characters
        if not safe_filename_re.match(f):
            continue
        content.append(get_file_contents("%s.js" % f))

    # Restore loading functions
    content.append('tinymce.each("%s".split(","), function(f){'
                   'tinymce.ScriptLoader.markDone(tinyMCE.baseURL+'
                   '"/"+f+".js");});' % ",".join(files))

    unicode_content = []
    for i, c in enumerate(content):
        try:
            unicode_content.append(c.decode('latin-1'))
        except UnicodeDecodeError:
            try:
                unicode_content.append(c.decode('utf-8'))
            except:
                print("%s is nor latin-1 nor utf-8." % files[i])
                raise

    # Compress
    if compress:
        content = compress_string(''.join([c.encode('utf-8')
                                           for c in unicode_content]))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(cacheKey, {
        'Last-Modified': response['Last-Modified'],
        'ETag': response.get('ETag', ''),
    })
    return response
Example #48
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        response.write(render_to_string(template_name='tinymce/tiny_mce_gzip.js', context={
            'base_url': tinymce_settings.JS_BASE_URL,
        }, request=request))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if cacheData is not None:
        if 'ETag' in cacheData:
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if 'Last-Modified' in cacheData:
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    content.append("var tinyMCEPreInit={base:'%s',suffix:''};" % tinymce_settings.JS_BASE_URL)

    # Add core
    files = ["tiny_mce"]

    # Add core languages
    for lang in languages:
        files.append("langs/%s" % lang)

    # Add plugins
    for plugin in plugins:
        files.append("plugins/%s/editor_plugin%s" % (plugin, suffix))

        for lang in languages:
            files.append("plugins/%s/langs/%s" % (plugin, lang))

    # Add themes
    for theme in themes:
        files.append("themes/%s/editor_template%s" % (theme, suffix))

        for lang in languages:
            files.append("themes/%s/langs/%s" % (theme, lang))

    for f in files:
        # Check for unsafe characters
        if not safe_filename_re.match(f):
            continue
        content.append(get_file_contents("%s.js" % f))

    # Restore loading functions
    content.append('tinymce.each("%s".split(","), function(f){'
                   'tinymce.ScriptLoader.markDone(tinyMCE.baseURL+'
                   '"/"+f+".js");});' % ",".join(files))

    unicode_content = []
    for i, c in enumerate(content):
        try:
            unicode_content.append(c.decode('latin-1'))
        except UnicodeDecodeError:
            try:
                unicode_content.append(c.decode('utf-8'))
            except:
                print("%s is nor latin-1 nor utf-8." % files[i])
                raise

    # Compress
    if compress:
        content = compress_string(''.join([c.encode('utf-8')
                                           for c in unicode_content]))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(cacheKey, {
        'Last-Modified': response['Last-Modified'],
        'ETag': response.get('ETag', ''),
    })
    return response
Example #49
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        response.write(
            render_to_string('tinymce/tiny_mce_gzip.js', {
                'base_url': tinymce.settings.JS_BASE_URL,
            },
                             context_instance=RequestContext(request)))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    # Add core, with baseURL added
    content.append(
        get_file_contents("tiny_mce%s.js" % suffix).replace(
            "tinymce._init();", "tinymce.baseURL='%s';tinymce._init();" %
            tinymce.settings.JS_BASE_URL))

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(get_file_contents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(
            get_file_contents("themes/%s/editor_template%s.js" %
                              (theme, suffix)))

        for lang in languages:
            content.append(
                get_file_contents("themes/%s/langs/%s.js" % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(
            get_file_contents("plugins/%s/editor_plugin%s.js" %
                              (plugin, suffix)))

        for lang in languages:
            content.append(
                get_file_contents("plugins/%s/langs/%s.js" % (plugin, lang)))

    # Add filebrowser
    if tinymce.settings.USE_FILEBROWSER:
        content.append(
            render_to_string(
                'tinymce/filebrowser.js', {},
                context_instance=RequestContext(request)).encode("utf-8"))

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    # Compress
    if compress:
        content = compress_string(''.join(content))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    return response
Example #50
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get('plugins', ''))
    languages = split_commas(request.GET.get('languages', ''))
    themes = split_commas(request.GET.get('themes', ''))
    isJS = request.GET.get('js', '') == 'true'
    compress = request.GET.get('compress', 'true') == 'true'
    suffix = request.GET.get('suffix', '') == '_src' and '_src' or ''
    content = []

    response = HttpResponse()
    response['Content-Type'] = 'text/javascript'

    if not isJS:
        response.write(render_to_string('tinymce/tiny_mce_gzip.js', {
            'base_url': tinymce.settings.JS_BASE_URL,
        }))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if cacheData is not None:
        if 'ETag' in cacheData:
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if 'Last-Modified' in cacheData:
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    tinyMCEPreInit = {
        'base': tinymce.settings.JS_BASE_URL,
        'suffix': '',
    }
    content.append('var tinyMCEPreInit={!s};'.format(
        json.dumps(tinyMCEPreInit)
    ))

    # Add core
    files = ['tiny_mce']

    # Add core languages
    for lang in languages:
        files.append('langs/{!s}'.format(lang))

    # Add plugins
    for plugin in plugins:
        files.append('plugins/{!s}/editor_plugin{!s}'.format(plugin, suffix))

        for lang in languages:
            files.append('plugins/{!s}/langs/{!s}'.format(plugin, lang))

    # Add themes
    for theme in themes:
        files.append('themes/{!s}/editor_template{!s}'.format(theme, suffix))

        for lang in languages:
            files.append('themes/{!s}/langs/{!s}'.format(theme, lang))

    for f in files:
        # Check for unsafe characters
        if not safe_filename_re.match(f):
            continue
        content.append(get_file_contents('{!s}.js'.format(f)))

    # Restore loading functions
    content.append('tinymce.each("{!s}".split(","), function(f){{'
                   'tinymce.ScriptLoader.markDone(tinyMCE.baseURL+'
                   '"/"+f+".js");}});'.format(','.join(files)))

    unicode_content = []
    for i, c in enumerate(content):
        try:
            unicode_content.append(c.decode('latin-1'))
        except AttributeError:
            # python 3 way
            unicode_content.append(smart_text(c))
        except UnicodeDecodeError:
            try:
                unicode_content.append(c.decode('utf-8'))
            except Exception:
                print('{!s} is nor latin-1 nor utf-8.'.format(files[i]))
                raise

    # Compress
    if compress:
        content = compress_string(b''.join([c.encode('utf-8')
                                           for c in unicode_content]))
        response['Content-Encoding'] = 'gzip'
        response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    if not response.has_header('Last-Modified'):
        # Last-Modified not set since Django 1.11
        response['Last-Modified'] = http_date()
    cache.set(cacheKey, {
        'Last-Modified': response['Last-Modified'],
        'ETag': response.get('ETag', ''),
    })
    return response
Example #51
0
 def __init__(self, importer):
     self.json = compress_string(importer.import_results.to_json())
     self.detailed_summary = importer.import_results.get_detailed_summary()
     self.report = importer.import_results.display_report()
Example #52
0
def gzip_compressor(request):
    plugins = split_commas(request.GET.get("plugins", ""))
    languages = split_commas(request.GET.get("languages", ""))
    themes = split_commas(request.GET.get("themes", ""))
    isJS = request.GET.get("js", "") == "true"
    compress = request.GET.get("compress", "true") == "true"
    suffix = request.GET.get("suffix", "") == "_src" and "_src" or ""
    content = []

    response = HttpResponse()
    response["Content-Type"] = "text/javascript"

    if not isJS:
        mce_config = tinymce.settings.DEFAULT_CONFIG
        compressor_config = {
                'plugins': mce_config.get('plugins', ''),
                'themes': mce_config.get('theme', 'advanced'),
                'languages': mce_config.get('language', 'en'),
                'diskcache': "true",
                'debug': "false",
            }
        response.write(render_to_string('tinymce/tiny_mce_gzip.js', {
            'base_url': tinymce.settings.JS_BASE_URL,
            'compressor_config' : compressor_config,
        }, context_instance=RequestContext(request)))
        return response

    patch_vary_headers(response, ['Accept-Encoding'])

    now = datetime.utcnow()
    response['Date'] = now.strftime('%a, %d %b %Y %H:%M:%S GMT')

    cacheKey = '|'.join(plugins + languages + themes)
    cacheData = cache.get(cacheKey)

    if not cacheData is None:
        if cacheData.has_key('ETag'):
            if_none_match = request.META.get('HTTP_IF_NONE_MATCH', None)
            if if_none_match == cacheData['ETag']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

        if cacheData.has_key('Last-Modified'):
            if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE', None)
            if if_modified_since == cacheData['Last-Modified']:
                response.status_code = 304
                response.content = ''
                response['Content-Length'] = '0'
                return response

    # Add core, with baseURL added
    content.append(get_file_contents("tiny_mce%s.js" % suffix))
    content.append('tinyMCE.baseURL="%s"' % tinymce.settings.JS_BASE_URL)

    # Patch loading functions
    content.append("tinyMCE_GZ.start();")

    # Add core languages
    for lang in languages:
        content.append(get_file_contents("langs/%s.js" % lang))

    # Add themes
    for theme in themes:
        content.append(get_file_contents("themes/%s/editor_template%s.js"
                % (theme, suffix)))

        for lang in languages:
            content.append(get_file_contents("themes/%s/langs/%s.js"
                    % (theme, lang)))

    # Add plugins
    for plugin in plugins:
        content.append(get_file_contents("plugins/%s/editor_plugin%s.js"
                % (plugin, suffix)))

        for lang in languages:
            content.append(get_file_contents("plugins/%s/langs/%s.js"
                    % (plugin, lang)))

    # Add filebrowser
    if tinymce.settings.USE_FILEBROWSER:
        content.append(render_to_string('tinymce/filebrowser.js', {},
            context_instance=RequestContext(request)).encode("utf-8"))

    # Restore loading functions
    content.append("tinyMCE_GZ.end();")

    print compress
    content = '\n'.join(content)
    # Compress
    if compress:
        content = compress_string(content)
        response['Content-Encoding'] = 'gzip'
        
    response['Content-Length'] = str(len(content))

    response.write(content)
    timeout = 3600 * 24 * 10
    patch_response_headers(response, timeout)
    cache.set(cacheKey, {
        'Last-Modified': response['Last-Modified'],
        'ETag': response.get('ETag', ''),
    })
    return response
Example #53
0
 def get_db_prep_value(self, value, connection=None, prepared=False):
     if value is not None:
         value = base64.encodestring(compress_string(pickle.dumps(value)))
     return value