Beispiel #1
0
def comments(request, page=1):
    perPage = 20
    slice_start = perPage*int(page)-perPage
    slice_end = perPage*int(page)

    comments = Comments.objects.filter(is_removed=False).order_by('-posted')
    amount = len(comments)
    rowsRange = int(math.ceil(amount/float(perPage)))   # amount of rows
    comments = comments[slice_start:slice_end]
    amount_this_page = len(comments)

    if amount_this_page == 0 and int(page) != 1:
        return HttpResponseRedirect("/comments/")

    last_comment_id_seen = request.COOKIES.get('last_comment_id_seen', comments[0].id)

    template = loader.get_template('index.html')
    template_args = {
        'content': 'comments.html',
        'request': request,
        'title': ' - Comments',
        'comments': comments,
        'amount': amount,
        'amount_this_page': amount_this_page,
        'range': [i+1 for i in range(rowsRange)],
        'page': int(page),
        'last_comment_id_seen': int(last_comment_id_seen),
    }
    response = StreamingHttpResponse(template.render(template_args, request))
    if int(page) == 1:
        response.set_cookie('last_comment_id_seen', comments[0].id, max_age=4320000)
    return response
Beispiel #2
0
 def dispatch(self, request, *args, **kwargs):
     response = StreamingHttpResponse(self._queue(),
                                      mimetype='text/event-stream')
     response['Cache-Control'] = 'no-cache'
     response['Software'] = 'opps-liveblogging'
     response.flush()
     return response
Beispiel #3
0
def download_key(request, report, file_name):
    user = request.user  
    if file_name:
        files = FileRecord.objects.filter(Q(author=user) & Q(report__report_id=report) & Q(name=file_name))
        if len(files) > 0:
            if file_name == 'output_pnl.png':
                ret = StreamingHttpResponse(files[0].content)
                print(ret)
                ret['Content-Type'] = 'image/jpeg'
            elif file_name == 'output_performance.csv':
                ret = []
                r=files[0].content.split('\n')
                regex = re.compile('\s+')
                columns = regex.split(r[0].strip())
                columns[0] = 'period'
                for i in range(1, len(r)):
                    ret.append(dict(zip(columns, regex.split(r[i].strip()))))
                return HttpResponse(json.dumps({'ret':ret}), content_type="application/json")

            else:
                ret = StreamingHttpResponse(files[0].content)
                ret['Content-Type']='application/octet-stream'
            return ret
        else:
            raise Http404("File Not Found")
    else:
        serializer = FileSerializer(FileRecord.objects.filter(Q(author=user) & Q(report__alpha_name=report)), many=True)
        return Response(serializer.data)
def generate_response(filepath, content_type, filename=None):
    filename = filename or os.path.basename(filepath)
    response = StreamingHttpResponse(export_iterator(filepath), content_type=content_type)
    response['Content-Length'] = default_storage.size(filepath)
    response['Content-Disposition'] = "attachment; filename=%s" % filename
    response.set_cookie(key='fileDownload', value="true")
    response.set_cookie(key='path', value="/")
    return response
Beispiel #5
0
def _download_file(request, file_path):
    """allows authorized user to download a given file"""

    if check_access(request):
            response = StreamingHttpResponse(content_type='application/force-download')
            response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(file_path)
            file_obj = open(file_path, 'rb')
            response.streaming_content = read_file_chunkwise(file_obj)
            return response
Beispiel #6
0
 def finalize_response(self, request, response, *args, **kwargs):
     response = super(BaseRasterView, self).finalize_response(
         request, response, *args, **kwargs)
     # Use streaming responses for GDAL formats.
     if isinstance(response.accepted_renderer,
                   renderers.gdal.BaseGDALRenderer):
         headers = response._headers
         response = StreamingHttpResponse(response.rendered_content)
         response._headers = headers
     return response
Beispiel #7
0
    def dispatch(self, request, *args, **kwargs):
        self.slug = self.kwargs.get('slug')
        self.event_obj = self.model.objects.get(
                channel_long_slug=self.get_long_slug(), slug=self.slug)

        response = StreamingHttpResponse(self._queue(),
                                         mimetype='text/event-stream')
        response['Cache-Control'] = 'no-cache'
        response['Software'] = 'opps-liveblogging'
        response['Access-Control-Allow-Origin'] = '*'
        response.flush()
        return response
Beispiel #8
0
 def dispatch(self, request, *args, **kwargs):
     if request.method.lower() in self.http_method_names:
         handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
     else:
         handler = self.http_method_not_allowed
     self.request = request
     self.args = args
     self.kwargs = kwargs
     self.timeout = self.kwargs.get('channel')
     response = HttpResponse()
     response.streaming_content = self._iterator(handler)
     response['Cache-Control'] = 'no-cache'
     return response
def get_django_response(proxy_response, strict_cookies=False):
    """This method is used to create an appropriate response based on the
    Content-Length of the proxy_response. If the content is bigger than
    MIN_STREAMING_LENGTH, which is found on utils.py,
    than django.http.StreamingHttpResponse will be created,
    else a django.http.HTTPResponse will be created instead

    :param proxy_response: An Instance of urllib3.response.HTTPResponse that
                           will create an appropriate response
    :param strict_cookies: Whether to only accept RFC-compliant cookies
    :returns: Returns an appropriate response based on the proxy_response
              content-length
    """
    status = proxy_response.status
    headers = proxy_response.headers

    logger.debug('Proxy response headers: %s', headers)

    content_type = headers.get('Content-Type')

    logger.debug('Content-Type: %s', content_type)

    if should_stream(proxy_response):
        logger.info('Content-Length is bigger than %s', DEFAULT_AMT)
        response = StreamingHttpResponse(proxy_response.stream(DEFAULT_AMT),
                                         status=status,
                                         content_type=content_type)
    else:
        content = proxy_response.data or b''
        response = HttpResponse(content, status=status,
                                content_type=content_type)

    logger.info('Normalizing response headers')
    set_response_headers(response, headers)

    logger.debug('Response headers: %s', getattr(response, '_headers'))

    cookies = proxy_response.headers.getlist('set-cookie')
    logger.info('Checking for invalid cookies')
    for cookie_string in cookies:
        cookie_dict = cookie_from_string(cookie_string,
                                         strict_cookies=strict_cookies)
        # if cookie is invalid cookie_dict will be None
        if cookie_dict:
            response.set_cookie(**cookie_dict)

    logger.debug('Response cookies: %s', response.cookies)

    return response
Beispiel #10
0
def get_django_response(proxy_response):
    """This method is used to create an appropriate response based on the
    Content-Length of the proxy_response. If the content is bigger than
    MIN_STREAMING_LENGTH, which is found on utils.py,
    than django.http.StreamingHttpResponse will be created,
    else a django.http.HTTPResponse will be created instead

    :param proxy_response: An Instance of urllib3.response.HTTPResponse that
                           will create an appropriate response
    :returns: Returns an appropriate response based on the proxy_response
              content-length
    """
    status = proxy_response.status
    headers = proxy_response.headers

    logger.debug('Proxy response headers: %s', headers)

    content_type = headers.get('Content-Type')

    logger.debug('Content-Type: %s', content_type)

    if should_stream(proxy_response):
        logger.info('Content-Length is bigger than %s', DEFAULT_AMT)
        response = StreamingHttpResponse(proxy_response.stream(DEFAULT_AMT),
                                         status=status,
                                         content_type=content_type)
    else:
        content = proxy_response.data or b''
        response = HttpResponse(content, status=status,
                                content_type=content_type)

    logger.info("Normalizing headers that aren't in IGNORE_HEADERS")
    for header, value in headers.items():
        if header.lower() not in IGNORE_HEADERS:
            response[header.title()] = value

    logger.debug('Response headers: %s', getattr(response, '_headers'))

    cookies = proxy_response.headers.getlist('set-cookie')
    logger.info('Checking for invalid cookies')
    for cookie_string in cookies:
        cookie_dict = cookie_from_string(cookie_string)
        # if cookie is invalid cookie_dict will be None
        if cookie_dict:
            response.set_cookie(**cookie_dict)

    logger.debug('Response cookies: %s', response.cookies)

    return response
Beispiel #11
0
    def process_response(self, request: HttpRequest,
                         response: StreamingHttpResponse) -> StreamingHttpResponse:
        def alter_content(content: bytes) -> bytes:
            str_content = content.decode("utf-8")
            bs = BeautifulSoup(str_content, features='lxml')
            # Skip any admonition (warning) blocks, since they're
            # usually something about users needing to be an
            # organization administrator, and not useful for
            # describing the page.
            for tag in bs.find_all('div', class_="admonition"):
                tag.clear()

            # Find the first paragraph after that, and convert it from HTML to text.
            first_paragraph_text = bs.find('p').text.replace('\n', ' ')
            return content.replace(request.placeholder_open_graph_description.encode("utf-8"),
                                   first_paragraph_text.encode("utf-8"))

        def wrap_streaming_content(content: Iterable[bytes]) -> Iterable[bytes]:
            for chunk in content:
                yield alter_content(chunk)

        if getattr(request, "placeholder_open_graph_description", None) is not None:
            assert not response.streaming
            response.content = alter_content(response.content)
        return response
Beispiel #12
0
    def process_response(self, request: HttpRequest,
                         response: StreamingHttpResponse) -> StreamingHttpResponse:

        if getattr(request, "placeholder_open_graph_description", None) is not None:
            assert not response.streaming
            response.content = alter_content(request, response.content)
        return response
Beispiel #13
0
def match(request, match_pk, mode='response'):
    """
    :mode:
       response -  Django response JSON
       json - Dumped JSON object
       python - Pure Python Dictionary
    """
    data = data_match(match_pk)

    def _json_response():
        try:
            response = JSONPResponse(
                data, {}, response_mimetype(request), request.GET['callback'])
        except:
            response = JSONResponse(data, {}, response_mimetype(request))
        return response

    if mode == 'response':
        response = _json_response()
        response['Content-Disposition'] = 'inline; filename=files.json'
    elif mode == 'sse':
        def _sse_queue():
            redis = Db('goalservematch', match_pk)
            pubsub = redis.object().pubsub()
            pubsub.subscribe(redis.key)
            while True:
                for m in pubsub.listen():
                    if m['type'] == 'message':
                        data = m['data'].decode('utf-8')
                        yield u"data: {}\n\n".format(data)
                yield
                time.sleep(0.5)

        response = StreamingHttpResponse(_sse_queue(),
                                         mimetype='text/event-stream')
        response['Cache-Control'] = 'no-cache'
        response['Software'] = 'opps-goalserve'
        response.flush()
    elif mode == 'json':
        response = _json_response()
    elif mode == 'python':
        response = data
    else:
        response = \
            "Please specify the mode argument as python, json or response"

    return response
Beispiel #14
0
def export_file(request, pk, file_name):
    """
    Allows authorized user to export a file.

    Adapted from https://github.com/ASKBOT/django-directory
    """
    export = get_object_or_404(Export, pk=pk)
    if (request.user == export.user) or request.user.is_superuser:
        filepath = os.path.join(export.path, file_name)
        log.debug("Exporting %s", filepath)
        if os.path.exists(filepath):
            response = StreamingHttpResponse()
            response['Content-Disposition'] = 'attachment; filename=%s' % file_name
            file_obj = open(filepath)
            response.streaming_content = _read_file_chunkwise(file_obj)
            return response
        else:
            raise Http404
    else:
        raise PermissionDenied
Beispiel #15
0
def download_view(request):
    if request.method == "GET":
        return render(request, "download.html")

    if request.POST["mb"] == "-1":
        # Intentionally generate an exception.
        _LOG.info("mb=-1 passed in.")
        print(math.sqrt(-1))

    buffer1k = _random_str(1023) + "/"
    mb = max(int(request.POST["mb"]), 1)
    ops = int(request.POST.get("ops", 0))
    _LOG.info("Start generating %dMB data now (with ops=%d)...", mb, ops)
    response = StreamingHttpResponse()
    response["Content-Type"] = "application/binary"
    response["Content-Disposition"] = 'attachment; filename="random{0}-{1}MB.bin"'.format(random.randint(10, 99), mb)
    response["Content-Length"] = str(1024 * 1024 * mb)
    response.streaming_content = _repeat_and_wait(buffer1k, 1024 * mb, ops)
    logging.info("Passing the generator to the response.")
    return response
Beispiel #16
0
def download_file(request, file_name):
    """allows authorized user to download a given file"""

    if os.path.sep in file_name:
        raise PermissionDenied()

    if check_access(request):
        directory = settings.DIRECTORY_DIRECTORY

        #make sure that file exists within current directory
        files = get_file_names(directory)
        if file_name in files:
            file_path = os.path.join(directory, file_name)
            response = StreamingHttpResponse(mimetype='application/force-download')
            response['Content-Disposition'] = 'attachment; filename=%s' % file_name
            file_obj = open(os.path.join(directory, file_name))
            response.streaming_content = read_file_chunkwise(file_obj)
            return response
        else:
            raise Http404
Beispiel #17
0
def get_download_response(payload, content_length, content_format, filename, request=None):
    """
    :param payload: File like object.
    :param content_length: Size of payload in bytes
    :param content_format: ``couchexport.models.Format`` instance
    :param filename: Name of the download
    :param request: The request. Used to determine if a range response should be given.
    :return: HTTP response
    """
    ranges = None
    if request and "HTTP_RANGE" in request.META:
        try:
            ranges = parse_range_header(request.META['HTTP_RANGE'], content_length)
        except ValueError:
            pass

    if ranges and len(ranges.ranges) != 1:
        ranges = None

    response = StreamingHttpResponse(content_type=content_format.mimetype)
    if content_format.download:
        response['Content-Disposition'] = safe_filename_header(filename)

    response["Content-Length"] = content_length
    response["Accept-Ranges"] = "bytes"

    if ranges:
        start, stop = ranges.ranges[0]
        if stop is not None and stop > content_length:
            # requested range not satisfiable
            return HttpResponse(status=416)

        response.streaming_content = RangedFileWrapper(payload, start=start, stop=stop or float("inf"))
        end = stop or content_length
        response["Content-Range"] = "bytes %d-%d/%d" % (start, end - 1, content_length)
        response["Content-Length"] = end - start
        response.status_code = 206
    else:
        response.streaming_content = FileWrapper(payload)

    return response
Beispiel #18
0
    def test_streaming_response(self):
        filename = os.path.join(os.path.dirname(__file__), 'abc.txt')

        # file isn't closed until we close the response.
        file1 = open(filename)
        r = StreamingHttpResponse(file1)
        self.assertFalse(file1.closed)
        r.close()
        self.assertTrue(file1.closed)

        # when multiple file are assigned as content, make sure they are all
        # closed with the response.
        file1 = open(filename)
        file2 = open(filename)
        r = StreamingHttpResponse(file1)
        r.streaming_content = file2
        self.assertFalse(file1.closed)
        self.assertFalse(file2.closed)
        r.close()
        self.assertTrue(file1.closed)
        self.assertTrue(file2.closed)
Beispiel #19
0
    def process_response(self, request: HttpRequest,
                         response: StreamingHttpResponse) -> StreamingHttpResponse:
        @cache_with_key(open_graph_description_cache_key, timeout=3600*24)
        def get_content_description(content: bytes, request: HttpRequest) -> str:
            str_content = content.decode("utf-8")
            bs = BeautifulSoup(str_content, features='lxml')
            # Skip any admonition (warning) blocks, since they're
            # usually something about users needing to be an
            # organization administrator, and not useful for
            # describing the page.
            for tag in bs.find_all('div', class_="admonition"):
                tag.clear()

            # Skip code-sections, which just contains navigation instructions.
            for tag in bs.find_all('div', class_="code-section"):
                tag.clear()

            text = ''
            for paragraph in bs.find_all('p'):
                # .text converts it from HTML to text
                text = text + paragraph.text + ' '
                if len(text) > 500:
                    return ' '.join(text.split())
            return ' '.join(text.split())

        def alter_content(content: bytes) -> bytes:
            first_paragraph_text = get_content_description(content, request)
            return content.replace(request.placeholder_open_graph_description.encode("utf-8"),
                                   first_paragraph_text.encode("utf-8"))

        def wrap_streaming_content(content: Iterable[bytes]) -> Iterable[bytes]:
            for chunk in content:
                yield alter_content(chunk)

        if getattr(request, "placeholder_open_graph_description", None) is not None:
            assert not response.streaming
            response.content = alter_content(response.content)
        return response
Beispiel #20
0
def index(request):

    error_message = ''

    # If the method is POST
    if request.method == 'POST':

        # Get the form
        form = UploadFileForm(request.POST, request.FILES)

        # Check if the form is valid
        if form.is_valid():

            # Get the file data
            filedata = request.FILES['file']

            # If the file data is not null
            if filedata is not None:

                # Get the width and height
                width = int(request.POST['width'])
                height = int(request.POST['height'])

                # If the width is invalid (it should never happen because it is
                # already validated in HTML and Javascript)
                if width <= 0 or width > 5000:
                    width = 100

                # If the height is invalid (it should never happen because it
                # is already validated in HTML and Javascript)
                if height <= 0 or height > 5000:
                    height = 100

                image_format = request.POST['image_format']

                # If the image format is invalid, set the PNG as default
                if image_format != "jpg" and image_format != "png" and image_format != "bmp":
                    image_format = "png"

                # The "image.save(tempFile, image_format)" function uses "jpeg"
                # as the image format
                if image_format == "jpg":
                    image_format = "jpeg"

                # Create an image object using the Pillow library
                image = Image.open(filedata)

                # Create a size variable using the width and height
                size = width, height

                # Resize the image using the width and height defined by the
                # user
                image = image.resize(size, Image.ANTIALIAS)

                # Temp file (NamedTemporaryFile delete the file as soon as
                # possible)
                tempFile = tempfile.NamedTemporaryFile()

                # Try to save the image in the MEDIA_ROOT
                try:
                    image.save(tempFile, image_format)
                    tempFile.seek(0)

                    wrapper = FileWrapper(tempFile)
                    response = StreamingHttpResponse(wrapper,
                                                     'image/%s' % image_format)
                    response[
                        'Content-Disposition'] = 'attachment; filename=resized_image.%s' % image_format

                    return response

                except AttributeError:
                    error_message = 'Error: unfortunately, we could not resize your image, please try again in a few seconds!'

    template = loader.get_template('resize/index.html')

    index = random.randint(0, len(slogans) - 1)

    context = {
        'slogan': slogans[index],
        'error_message': error_message,
    }

    return HttpResponse(template.render(context, request))
Beispiel #21
0
    def post(self, request, *args, **kwargs):
        vol = self.object = self.get_object()

        # don't do anything if user is not logged in
        if self.request.user.is_anonymous():
            response = render(request, self.template_name, self.get_context_data())
            response.status_code = 400  # bad request
            return response

        # get posted form data and use that to generate the export
        export_form = self.get_form()
        if export_form.is_valid():
            cleaned_data = export_form.cleaned_data

            # if github export is requested, make sure user has a
            # github account available to use for access
            if cleaned_data['github']:
                try:
                    github.GithubApi.github_account(self.request.user)
                except github.GithubAccountNotFound:
                    return self.render(request, error=self.github_account_msg)

                # check that oauth token has sufficient permission
                # to do needed export steps
                gh = github.GithubApi.connect_as_user(self.request.user)
                # note: repo would also work here, but currently asking for public_repo
                if 'public_repo' not in gh.oauth_scopes():
                    return self.render(request, error=self.github_scope_msg)
        else:
            return self.render(request)

        # determine which annotations should be loaded
        if cleaned_data['annotations'] == 'user':
            # annotations *by* this user
            # (NOT all annotations the user can view)
            annotations = vol.annotations().filter(user=request.user)
        elif cleaned_data['annotations'].startswith('group:'):
            # all annotations visible to a group this user belongs to
            group_id = cleaned_data['annotations'][len('group:'):]
            # NOTE: object not found error should not occur here,
            # because only valid group ids should be valid choices
            group = AnnotationGroup.objects.get(pk=group_id)
            annotations = vol.annotations().visible_to_group(group)

        # generate annotated tei
        tei = annotate.annotated_tei(vol.generate_volume_tei(),
                                     annotations)

        # check form data to see if github repo is requested
        if cleaned_data['github']:
            try:
                repo_url, ghpages_url = export.website_gitrepo(request.user,
                    cleaned_data['github_repo'], vol, tei,
                    page_one=cleaned_data['page_one'])

                logger.info('Exported %s to GitHub repo %s for user %s',
                    vol.pid, repo_url, request.user.username)

                # NOTE: maybe use a separate template here?
                return self.render(request, repo_url=repo_url,
                    ghpages_url=ghpages_url, github_export=True)
            except export.GithubExportException as err:
                response = self.render(request, error='Export failed: %s' % err)
                response.status_code = 400  # maybe?
                return response
        else:
            # non github export: download zipfile
            try:
                webzipfile = export.website_zip(vol, tei,
                    page_one=cleaned_data['page_one'])
                logger.info('Exported %s as jekyll zipfile for user %s',
                    vol.pid, request.user.username)
                response = StreamingHttpResponse(FileWrapper(webzipfile, 8192),
                    content_type='application/zip')
                response['Content-Disposition'] = 'attachment; filename="%s_annotated_jeyll_site.zip"' % \
                    (vol.noid)
                response['Content-Length'] = os.path.getsize(webzipfile.name)
            except export.ExportException as err:
                # display error to user and redisplay the form
                response = self.render(request, error='Export failed. %s' % err)
                response.status_code = 500

            # set a cookie to indicate download is complete, that can be
            # used by javascript to hide a 'generating' indicator
            completion_cookie_name = request.POST.get('completion-cookie',
                '%s-web-export' % vol.noid)
            response.set_cookie(completion_cookie_name, 'complete', max_age=10)
            return response
Beispiel #22
0
def start(request):
    return StreamingHttpResponse(scrap())
Beispiel #23
0
def mapAPI(request, arg, arg1="", arg2="", arg3="", arg4=""):
    # get detailed map info by title
    if arg == "title":
        title = arg1.lower()
        mapObject = Maps.objects.filter(title__icontains=title)
        if not mapObject:
            raise Http404
        if arg2 == "yaml":
            yaml_response = ""
            for item in mapObject:
                yaml_response += serialize_basic_map_info(
                    request, item, "yaml")
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            for item in mapObject:
                json_response.append(serialize_basic_map_info(request, item))
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response

    # get detailed map info by hash
    elif arg == "hash":
        map_hashes = arg1.split(',')
        mapObject = Maps.objects.filter(
            map_hash__in=map_hashes).distinct('map_hash')
        if not mapObject:
            raise Http404
        if arg2 == "yaml":
            yaml_response = ""
            for item in mapObject:
                yaml_response += serialize_basic_map_info(
                    request, item, "yaml")
            if yaml_response == "":
                raise Http404
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            for item in mapObject:
                json_response.append(serialize_basic_map_info(request, item))
            if len(json_response) == 0:
                raise Http404
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response

    # get detailed map info by ID
    elif arg == "id":
        map_IDs = arg1.split(',')
        mapObject = Maps.objects.filter(id__in=map_IDs)
        if not mapObject:
            raise Http404
        if arg2 == "yaml":
            yaml_response = ""
            for item in mapObject:
                yaml_response += serialize_basic_map_info(
                    request, item, "yaml")
            if yaml_response == "":
                raise Http404
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            for item in mapObject:
                json_response.append(serialize_basic_map_info(request, item))
            if len(json_response) == 0:
                raise Http404
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response

    # get URL of map by hash
    elif arg == "url":
        map_hashes = arg1.split(',')
        mapObject = Maps.objects.filter(map_hash__in=map_hashes)
        if not mapObject:
            raise Http404
        if arg2 == "yaml":
            yaml_response = ""
            for item in mapObject:
                yaml_response += serialize_url_map_info(request, item, "yaml")
            if yaml_response == "":
                raise Http404
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            for item in mapObject:
                json_response.append(serialize_url_map_info(request, item))
            if len(json_response) == 0:
                raise Http404
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response

    # get minimap preview by hash (represented in JSON by encoded into base64)
    elif arg == "minimap":
        map_hashes = arg1.split(',')
        mapObject = Maps.objects.filter(map_hash__in=map_hashes)
        if not mapObject:
            raise Http404
        if arg2 == "yaml":
            yaml_response = ""
            for item in mapObject:
                yaml_response += serialize_minimap_map_info(
                    request, item, "yaml")
            if yaml_response == "":
                raise Http404
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            for item in mapObject:
                json_response.append(serialize_minimap_map_info(request, item))
            if len(json_response) == 0:
                raise Http404
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response

    # get detailed map info + encoded minimap + URL for a range of maps (supports filters)
    elif arg == "list":
        mod = arg1
        if mod == "":
            raise Http404
        if arg2 not in [
                "rating", "-rating", "players", "-players", "posted",
                "-posted", "downloaded", "-downloaded", "title", "-title",
                "author_name", "-author_name", "author", "uploader", ""
        ]:
            raise Http404
        try:
            mapObject = Maps.objects.filter(
                game_mod=mod.lower(),
                downloading=True,
                amount_reports__lt=settings.REPORTS_PENALTY_AMOUNT).distinct(
                    'map_hash')
            if arg2 == "players":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.players),
                                   reverse=True)
            if arg2 == "-players":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.players),
                                   reverse=False)
            if arg2 == "posted":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.posted),
                                   reverse=True)
            if arg2 == "-posted":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.posted),
                                   reverse=False)
            if arg2 == "rating":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.rating),
                                   reverse=True)
            if arg2 == "-rating":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.rating),
                                   reverse=False)
            if arg2 == "downloaded":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.downloaded),
                                   reverse=True)
            if arg2 == "-downloaded":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.downloaded),
                                   reverse=False)
            if arg2 == "title":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.title),
                                   reverse=False)
            if arg2 == "-title":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.title),
                                   reverse=True)
            if arg2 == "author_name":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.author),
                                   reverse=False)
            if arg2 == "-author_name":
                mapObject = sorted(mapObject,
                                   key=lambda x: (x.author),
                                   reverse=True)
            if arg2 == "author":
                if arg3 == "":
                    mapObject = []
                else:
                    if arg3 != "yaml":
                        mapObject = mapObject.filter(
                            author__iexact=arg3.lower())
                        if not mapObject:
                            mapObject = []
                    else:
                        mapObject = []
            if arg2 == "uploader":
                if arg3 == "":
                    mapObject = []
                else:
                    if arg3 != "yaml":
                        try:
                            u = User.objects.get(username__iexact=arg3.lower())
                            mapObject = mapObject.filter(user_id=u.id)
                        except:
                            mapObject = []
                    else:
                        mapObject = []
        except:
            raise Http404
        page = 1
        try:
            page = int(arg3)
        except:
            pass
        perPage = 24
        slice_start = perPage * int(page) - perPage
        slice_end = perPage * int(page)
        mapObject = mapObject[slice_start:slice_end]
        if "yaml" in [arg3, arg4]:
            yaml_response = ""
            for item in mapObject:
                yaml_response += serialize_basic_map_info(
                    request, item, "yaml")
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            for item in mapObject:
                response_data = serialize_basic_map_info(request, item)
                json_response.append(response_data)
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response

    elif arg == "sync":
        mod = arg1
        if mod == "":
            raise Http404
        try:
            mapObject = Maps.objects.filter(game_mod=mod.lower(), next_rev=0)
            mapObject = mapObject.filter(
                downloading=True,
                amount_reports__lt=settings.REPORTS_PENALTY_AMOUNT).distinct(
                    "map_hash")
            mapObjectCopy = []
            for item in mapObject:
                reportObject = Reports.objects.filter(ex_id=item.id,
                                                      ex_name="maps")
                if len(reportObject) < settings.REPORTS_PENALTY_AMOUNT:
                    mapObjectCopy.append(item)
            mapObject = mapObjectCopy
            mapObject = sorted(mapObject, key=lambda x: (x.id))
            if not mapObject:
                raise Http404
        except:
            raise Http404
        data = ""
        for item in mapObject:
            data = data + get_url(request, item.id) + "/sync" + "\n"
        response = StreamingHttpResponse(data, content_type="plain/text")
        response['Access-Control-Allow-Origin'] = '*'
        return response
    elif arg == "syncall":
        mod = arg1
        if mod == "":
            raise Http404
        mapObject = Maps.objects.filter(
            game_mod=mod.lower()).distinct("map_hash")
        mapObject = sorted(mapObject, key=lambda x: (x.id))
        if not mapObject:
            raise Http404
        data = ""
        for item in mapObject:
            data = data + get_url(request, item.id) + "/sync" + "\n"
        response = StreamingHttpResponse(data, content_type="plain/text")
        response['Access-Control-Allow-Origin'] = '*'
        return response
    elif arg == "lastmap":
        mapObject = Maps.objects.latest('id')
        if arg1 == "yaml":
            yaml_response = serialize_basic_map_info(request, mapObject,
                                                     "yaml")
            if yaml_response == "":
                raise Http404
            response = StreamingHttpResponse(yaml_response,
                                             content_type="text/plain")
            response['Access-Control-Allow-Origin'] = '*'
            return response
        else:
            json_response = []
            json_response.append(serialize_basic_map_info(request, mapObject))
            if len(json_response) == 0:
                raise Http404
            response = StreamingHttpResponse(
                json.dumps(json_response, indent=4),
                content_type="application/javascript")
            response['Access-Control-Allow-Origin'] = '*'
            return response
    else:
        # serve application/zip by hash
        oramap = ""
        try:
            mapObject = Maps.objects.filter(map_hash=arg, downloading=True)[0]
        except:
            raise Http404
        if not mapObject.downloading:
            raise Http404
        if mapObject.amount_reports >= settings.REPORTS_PENALTY_AMOUNT:
            raise Http404
        path = os.getcwd() + os.sep + __name__.split(
            '.')[0] + '/data/maps/' + str(mapObject.id)
        try:
            mapDir = os.listdir(path)
        except:
            raise Http404
        for filename in mapDir:
            if filename.endswith(".oramap"):
                oramap = filename
                break
        if oramap == "":
            raise Http404
        serveOramap = path + os.sep + oramap
        oramap = os.path.splitext(oramap)[0] + "-" + str(
            mapObject.revision) + ".oramap"
        response = StreamingHttpResponse(open(serveOramap, 'rb'),
                                         content_type='application/zip')
        response['Content-Disposition'] = 'attachment; filename = %s' % oramap
        response['Content-Length'] = os.path.getsize(serveOramap)
        Maps.objects.filter(id=mapObject.id).update(
            downloaded=mapObject.downloaded + 1)
        return response
Beispiel #24
0
def export_csv_response(generator, name='export.csv'):
    response = StreamingHttpResponse(generator, content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="%s"' % name
    return response
Beispiel #25
0
def streaming1_csv_view(request):
    response = StreamingHttpResponse(content_type="text/csv")
    response['Content-Disposition'] = "attachment;filename=large.csv"
    rows = ("p{},{}\n".format(x, x) for x in range(0, 1000000))
    response.streaming_content = rows
    return response
Beispiel #26
0
    def test_streaming_response(self):
        r = StreamingHttpResponse(iter(['hello', 'world']))

        # iterating over the response itself yields bytestring chunks.
        chunks = list(r)
        self.assertEqual(chunks, [b'hello', b'world'])
        for chunk in chunks:
            self.assertIsInstance(chunk, bytes)

        # and the response can only be iterated once.
        self.assertEqual(list(r), [])

        # even when a sequence that can be iterated many times, like a list,
        # is given as content.
        r = StreamingHttpResponse(['abc', 'def'])
        self.assertEqual(list(r), [b'abc', b'def'])
        self.assertEqual(list(r), [])

        # iterating over strings still yields bytestring chunks.
        r.streaming_content = iter(['hello', 'café'])
        chunks = list(r)
        # '\xc3\xa9' == unichr(233).encode()
        self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
        for chunk in chunks:
            self.assertIsInstance(chunk, bytes)

        # streaming responses don't have a `content` attribute.
        self.assertFalse(hasattr(r, 'content'))

        # and you can't accidentally assign to a `content` attribute.
        with self.assertRaises(AttributeError):
            r.content = 'xyz'

        # but they do have a `streaming_content` attribute.
        self.assertTrue(hasattr(r, 'streaming_content'))

        # that exists so we can check if a response is streaming, and wrap or
        # replace the content iterator.
        r.streaming_content = iter(['abc', 'def'])
        r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
        self.assertEqual(list(r), [b'ABC', b'DEF'])

        # coercing a streaming response to bytes doesn't return a complete HTTP
        # message like a regular response does. it only gives us the headers.
        r = StreamingHttpResponse(iter(['hello', 'world']))
        self.assertEqual(bytes(r), b'Content-Type: text/html; charset=utf-8')

        # and this won't consume its content.
        self.assertEqual(list(r), [b'hello', b'world'])

        # additional content cannot be written to the response.
        r = StreamingHttpResponse(iter(['hello', 'world']))
        with self.assertRaises(Exception):
            r.write('!')

        # and we can't tell the current position.
        with self.assertRaises(Exception):
            r.tell()

        r = StreamingHttpResponse(iter(['hello', 'world']))
        self.assertEqual(r.getvalue(), b'helloworld')
Beispiel #27
0
    def post(cls, request, *args, **kwargs):
        # Allow only post from superusers
        if not request.user.is_superuser:
            return HttpResponseNotAllowed(
                ["post"],
                content="Only a superuser can execute SQL statements")
        if request.method != "POST" or not request.is_ajax():
            return HttpResponseForbidden("<h1>%s</h1>" %
                                         _("Permission denied"))

        if "format" in request.POST:
            formatted = sqlparse.format(
                request.POST.get("sql", ""),
                keyword_case="lower",
                identifier_case="lower",
                strip_comments=False,
                reindent=True,
                wrap_after=50,
                indent_tabs=False,
                indent_width=2,
            )
            return JsonResponse({"formatted": formatted})

        elif "save" in request.POST:
            if "id" in request.POST:
                m = SQLReport.objects.using(
                    request.database).get(pk=request.POST["id"])
                if m.user.id != request.user.id:
                    return HttpResponseForbidden(
                        "You're not the owner of this report")
                f = SQLReportForm(request.POST, instance=m)
            else:
                f = SQLReportForm(request.POST)
            if f.is_valid():
                m = f.save(commit=False)
                m.user = request.user
                m.save()
                return JsonResponse({"id": m.id})
            else:
                return HttpResponseServerError("Error saving report")

        elif "delete" in request.POST:
            pk = request.POST["id"]
            SQLReport.objects.using(request.database).filter(
                pk=pk, user=request.user).delete()
            messages.add_message(
                request,
                messages.INFO,
                _('The %(name)s "%(obj)s" was deleted successfully.') % {
                    "name": _("my report"),
                    "obj": pk
                },
            )
            return HttpResponse("ok")

        elif "test" in request.POST:
            return StreamingHttpResponse(
                content_type="application/json; charset=%s" %
                settings.DEFAULT_CHARSET,
                streaming_content=cls._generate_json_data(
                    database=request.database, sql=request.POST["sql"]),
            )

        else:
            return HttpResponseNotAllowed("Unknown post request")
 def serve(self, rendition):
     # Open and serve the file
     rendition.file.open('rb')
     image_format = imghdr.what(rendition.file)
     return StreamingHttpResponse(FileWrapper(rendition.file),
                                  content_type='image/' + image_format)
Beispiel #29
0
def streaming_view(request):
    return StreamingHttpResponse(streaming_writer(30))
def serve(request, document_id, document_filename):
    Document = get_document_model()
    doc = get_object_or_404(Document, id=document_id)

    # We want to ensure that the document filename provided in the URL matches the one associated with the considered
    # document_id. If not we can't be sure that the document the user wants to access is the one corresponding to the
    # <document_id, document_filename> pair.
    if doc.filename != document_filename:
        raise Http404('This document does not match the given filename.')

    for fn in hooks.get_hooks('before_serve_document'):
        result = fn(doc, request)
        if isinstance(result, HttpResponse):
            return result

    # Send document_served signal
    document_served.send(sender=Document, instance=doc, request=request)

    try:
        local_path = doc.file.path
    except NotImplementedError:
        local_path = None

    try:
        direct_url = doc.file.url
    except NotImplementedError:
        direct_url = None

    serve_method = getattr(settings, 'WAGTAILDOCS_SERVE_METHOD', None)

    # If no serve method has been specified, select an appropriate default for the storage backend:
    # redirect for remote storages (i.e. ones that provide a url but not a local path) and
    # serve_view for all other cases
    if serve_method is None:
        if direct_url and not local_path:
            serve_method = 'redirect'
        else:
            serve_method = 'serve_view'

    if serve_method in ('redirect', 'direct') and direct_url:
        # Serve the file by redirecting to the URL provided by the underlying storage;
        # this saves the cost of delivering the file via Python.
        # For serve_method == 'direct', this view should not normally be reached
        # (the document URL as used in links should point directly to the storage URL instead)
        # but we handle it as a redirect to provide sensible fallback /
        # backwards compatibility behaviour.
        return redirect(direct_url)

    if local_path:

        # Use wagtail.utils.sendfile to serve the file;
        # this provides support for mimetypes, if-modified-since and django-sendfile backends

        sendfile_opts = {
            'attachment': (doc.content_disposition != 'inline'),
            'attachment_filename': doc.filename,
            'mimetype': doc.content_type,
        }
        if not hasattr(settings, 'SENDFILE_BACKEND'):
            # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
            sendfile_opts['backend'] = sendfile_streaming_backend.sendfile

        return sendfile(request, local_path, **sendfile_opts)

    else:

        # We are using a storage backend which does not expose filesystem paths
        # (e.g. storages.backends.s3boto.S3BotoStorage) AND the developer has not allowed
        # redirecting to the file url directly.
        # Fall back on pre-sendfile behaviour of reading the file content and serving it
        # as a StreamingHttpResponse

        wrapper = FileWrapper(doc.file)
        response = StreamingHttpResponse(wrapper, doc.content_type)

        # set filename and filename* to handle non-ascii characters in filename
        # see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
        response['Content-Disposition'] = doc.content_disposition

        # FIXME: storage backends are not guaranteed to implement 'size'
        response['Content-Length'] = doc.file.size

        return response
Beispiel #31
0
    def form_valid(self, form):
        from_date = form.cleaned_data.get('from_date')
        to_date = form.cleaned_data.get('to_date')

        filters = {}

        if from_date:
            filters['date_created__gte'] = from_date

        if to_date:
            filters['date_created__lte'] = to_date

        revisions = Revision.objects.filter(**filters).exclude(
            user__username='******')

        pseudo_buffer = Echo()
        writer = csv.writer(pseudo_buffer)

        def yield_revisions():
            header = [
                'revision_id',
                'entity_id',
                'entity_type',
                'user',
                'modification_date',
                'entity_attribute_name',
                'attribute',
                'value',
            ]

            yield header

            for revision in revisions:
                for version in revision.version_set.all():
                    if hasattr(version.object, 'object_ref'):
                        reference_object = version.object.object_ref
                    elif isinstance(version.object, Source) or isinstance(
                            version.object, AccessPoint):
                        reference_object = version.object
                    else:
                        continue

                    try:
                        entity_id = reference_object.uuid
                    except AttributeError:
                        entity_id = reference_object.id

                    entity_type = reference_object._meta.object_name

                    field_name = version.object._meta.object_name.replace(
                        entity_type, '')

                    for key, value in version.field_dict.items():

                        if key == 'id' or key.endswith('_id'):
                            continue

                        if isinstance(value, list):
                            value = ';'.join(value)

                        row = [
                            revision.id,
                            entity_id,
                            entity_type,
                            revision.user.username,
                            revision.date_created.isoformat(),
                            field_name,
                            key,
                            value,
                        ]

                        yield row

        response = StreamingHttpResponse(
            (writer.writerow(row) for row in yield_revisions()),
            content_type="text/csv")
        response[
            'Content-Disposition'] = 'attachment; filename="changelog-{}.csv"'.format(
                timezone.now().isoformat())

        return response
Beispiel #32
0
def download(modeladmin, request, selected):
    buf = StringIO('This is the content of the file')
    return StreamingHttpResponse(FileWrapper(buf))
Beispiel #33
0
def livefe(request):
    try:
        return StreamingHttpResponse(gen(VideoCamera()), content_type="multipart/x-mixed-replace;boundary=frame")
    except:  # This is bad! replace it with proper handling
        pass
Beispiel #34
0
    def do_request(self, request, url, method, workspace):

        url = iri_to_uri(url)

        request_data = {
            "method": method,
            "url": url,
            "data": None,
            "headers": {},
            "cookies": SimpleCookie(),
            "user": request.user,
            "workspace": workspace,
            "original-request": request,
        }

        # Request creation
        proto, host, cgi, param, query = urlparse(url)[:5]

        # Extract headers from META
        if 'HTTP_TRANSFER_ENCODING' in request.META:
            return build_error_response(request, 500, "Wirecloud doesn't support requests using Transfer-Encodings")

        for header in request.META.items():
            header_name = header[0].lower()
            if header_name == 'content_type' and header[1]:
                request_data['headers']["content-type"] = header[1]

            elif header_name == 'content_length' and header[1]:
                # Only take into account request body if the request has a
                # Content-Length header (we don't support chunked requests)
                request_data['data'] = request
                request_data['headers']['content-length'] = header[1]
                request_data['data'].len = int(header[1])

            elif header_name == 'cookie' or header_name == 'http_cookie':

                cookie_parser = SimpleCookie(str(header[1]))

                del cookie_parser[str(settings.SESSION_COOKIE_NAME)]

                if str(settings.CSRF_COOKIE_NAME) in cookie_parser:
                    del cookie_parser[str(settings.CSRF_COOKIE_NAME)]

                request_data['cookies'].update(cookie_parser)

            elif self.http_headerRE.match(header_name) and not header_name in self.blacklisted_http_headers:

                fixed_name = header_name.replace("http_", "", 1).replace('_', '-')
                request_data['headers'][fixed_name] = header[1]

        # Build the Via header
        protocolVersion = self.protocolRE.match(request.META['SERVER_PROTOCOL'])
        if protocolVersion is not None:
            protocolVersion = protocolVersion.group(1)
        else:
            protocolVersion = '1.1'

        via_header = "%s %s (Wirecloud-python-Proxy/1.1)" % (protocolVersion, get_current_domain(request))
        if 'via' in request_data['headers']:
            request_data['headers']['via'] += ', ' + via_header
        else:
            request_data['headers']['via'] = via_header

        # XFF headers
        if 'x-forwarded-for' in request_data['headers']:
            request_data['headers']['x-forwarded-for'] += ', ' + request.META['REMOTE_ADDR']
        else:
            request_data['headers']['x-forwarded-for'] = request.META['REMOTE_ADDR']

        request_data['headers']['x-forwarded-host'] = host
        if 'x-forwarded-server' in request_data['headers']:
            del request_data['headers']['x-forwarded-server']

        # Pass proxy processors to the new request
        try:
            for processor in get_request_proxy_processors():
                processor.process_request(request_data)
        except ValidationError as e:
            return e.get_response(request)

        # Cookies
        cookie_header_content = ', '.join([cookie_parser[key].OutputString() for key in request_data['cookies']])
        if cookie_header_content != '':
            request_data['headers']['Cookie'] = cookie_header_content

        # Open the request
        try:
            res = requests.request(request_data['method'], request_data['url'], headers=request_data['headers'], data=request_data['data'], stream=True, verify=getattr(settings, 'WIRECLOUD_HTTPS_VERIFY', True))
        except requests.exceptions.Timeout as e:
            return build_error_response(request, 504, _('Gateway Timeout'), details=six.text_type(e))
        except requests.exceptions.SSLError as e:
            return build_error_response(request, 502, _('SSL Error'), details=six.text_type(e))
        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.TooManyRedirects) as e:
            return build_error_response(request, 504, _('Connection Error'), details=six.text_type(e))

        # Build a Django response
        response = StreamingHttpResponse(res.raw.stream(4096, decode_content=False), status=res.status_code)
        if 'reason_phrase' in response:  # pragma: no cover
            # Currently only django 1.6+ supports custom reason phrases
            response.reason_phrase = res.reason_phrase

        # Add all the headers received from the response
        for header in res.headers:

            header_lower = header.lower()
            if header_lower == 'set-cookie':

                for cookie in res.cookies:
                    response.set_cookie(cookie.name, value=cookie.value, expires=cookie.expires, path=cookie.path)

            elif header_lower == 'via':

                via_header = via_header + ', ' + res.headers[header]

            elif is_valid_response_header(header_lower):
                response[header] = res.headers[header]

        # Pass proxy processors to the response
        for processor in get_response_proxy_processors():
            response = processor.process_response(request_data, response)

        response['Via'] = via_header

        return response
Beispiel #35
0
def detect(request):
    return StreamingHttpResponse(
        run_darkflow(),
        content_type="multipart/x-mixed-replace;boundary=frame")
Beispiel #36
0
def streamhr(request):
    return StreamingHttpResponse(
        gen(VideoCamera(videoname)),
        content_type='multipart/x-mixed-replace; boundary=frame')
Beispiel #37
0
    def get(self, request, tenantName, *args, **kwargs):
        """
        下载应用包
        ---
        parameters:
            - name: tenantName
              description: 团队名称
              required: true
              type: string
              paramType: path
            - name: format
              description: 导出类型 rainbond-app | docker-compose
              required: true
              type: string
              paramType: form
            - name: app_id
              description: rainbond app id
              required: true
              type: string
              paramType: query

        """
        try:
            app_id = request.GET.get("app_id", None)
            export_format = request.GET.get("format", None)
            if not app_id:
                return Response(general_message(400, "app id is null",
                                                "请指明需要下载的应用"),
                                status=400)
            if not export_format or export_format not in (
                    "rainbond-app",
                    "docker-compose",
            ):
                return Response(general_message(400,
                                                "export format is illegal",
                                                "请指明下载的格式"),
                                status=400)

            code, app = market_app_service.get_rain_bond_app_by_pk(app_id)
            if not app:
                return Response(general_message(404, "not found", "云市应用不存在"),
                                status=404)

            export_record = export_service.get_export_record(
                export_format, app)
            if not export_record:
                return Response(general_message(400, "no export records",
                                                "该应用无导出记录,无法下载"),
                                status=400)
            if export_record.status != "success":
                if export_record.status == "failed":
                    return Response(general_message(400, "export failed",
                                                    "应用导出失败,请重试"),
                                    status=400)
                if export_record.status == "exporting":
                    return Response(general_message(400, "exporting",
                                                    "应用正在导出中,请稍后重试"),
                                    status=400)

            req, file_name = export_service.get_file_down_req(
                export_format, tenantName, app)

            response = StreamingHttpResponse(self.file_iterator(req))
            response['Content-Type'] = 'application/octet-stream'
            response[
                'Content-Disposition'] = 'attachment;filename="{0}"'.format(
                    file_name)
            return response
        except Exception as e:
            logger.exception(e)
            result = error_message(e.message)
            return Response(result, status=result["code"])
Beispiel #38
0
 def test_no_etag_streaming_response(self):
     res = StreamingHttpResponse(['content'])
     self.assertFalse(ConditionalGetMiddleware().process_response(
         self.req, res).has_header('ETag'))
Beispiel #39
0
def gh_csv(request, pid):
    # Create the HttpResponse object with the appropriate CSV header.
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="exportation.txt"'
    # The data is hard-coded here, but you could load it from a database or
    # some other source.

    personne = Personne.objects.get(pk=pid)
    csv_data = ([])
    debut = []
    debut.append('Province & File code')
    debut.append(personne.province.reponse_en)
    debut.append(personne.personne_code)
    csv_data.append(debut)
    questionnaires = Questionnaire.objects.filter(id__gt=1).exclude(id=300)
    assistants = User.objects.all()
    entrevues = Interview.objects.all()
    for assistant in assistants:
        for entrevue in entrevues:
            if Resultat.objects.filter(personne__id=pid,
                                       assistant_id=assistant.id,
                                       interview_id=entrevue.id).exists():
                for questionnaire in questionnaires:
                    ligne2 = []
                    questions = Question.objects.filter(
                        questionnaire_id=questionnaire.id).order_by(
                            'questionno')
                    ligne2.append(assistant.username)
                    ligne2.append(questionnaire.nom_en)
                    ligne2.append(entrevue.reponse_en)
                    csv_data.append(ligne2)
                    if questionnaire.id != 4 and questionnaire.id != 3:
                        for question in questions:
                            ligne = []
                            donnee = Resultat.objects.filter(
                                personne__id=pid,
                                question__id=question.id,
                                assistant_id=assistant.id,
                                interview_id=entrevue.id)
                            if donnee:
                                ligne.append(question.varname)
                                ligne.append(question.questionen)
                                reponse = fait_reponsegh(
                                    donnee[0].reponse_texte, question,
                                    personne.province)
                                ligne.append(reponse)
                            if ligne != []:
                                csv_data.append(ligne)
                    else:
                        donnees = Resultatrepet.objects.order_by().filter(
                            personne__id=pid,
                            assistant__id=assistant.id,
                            interview_id=entrevue.id,
                            questionnaire__id=questionnaire.id).values_list(
                                'fiche', flat=True).distinct()
                        compte = donnees.count()
                        ligne2 = []
                        ligne2.append(
                            str(compte) + ' different entries for ' +
                            questionnaire.nom_en)
                        csv_data.append(ligne2)
                        for i in donnees:
                            ligne2 = []
                            ligne2.append(questionnaire.nom_en +
                                          ' card number ' + str(i))
                            csv_data.append(ligne2)
                            for question in questions:
                                try:
                                    donnee = Resultatrepet.objects.get(
                                        personne_id=pid,
                                        question_id=question.id,
                                        interview_id=entrevue.id,
                                        assistant_id=assistant.id,
                                        fiche=i)
                                except Resultatrepet.DoesNotExist:
                                    donnee = None
                                if donnee:
                                    ligne = []
                                    ligne.append(question.varname)
                                    ligne.append(question.questionen)
                                    #reponse = fait_reponsegh(donnee.reponse_texte, question, personne.province)
                                    ligne.append(donnee.reponsetexte)

                                csv_data.append(ligne)

    pseudo_buffer = Echo()
    writer = csv.writer(pseudo_buffer, dialect="excel-tab")
    response = StreamingHttpResponse(
        (writer.writerow(row) for row in csv_data), content_type="text/csv")
    response['Content-Disposition'] = 'attachment; filename="' + str(
        personne.personne_code) + '.txt"'
    return response
Beispiel #40
0
def download_from_url(request, file_url):
    r = requests.get(file_url, stream=True)
    resp = StreamingHttpResponse(streaming_content=r)
    file_name = file_url.split("/")[-1]
    resp['Content-Disposition'] = 'attachment;filename="'+file_name+'"'
    return resp
Beispiel #41
0
def serve(request, path, document_root=None, show_indexes=False):
    """
    Serve static files below a given point in the directory structure.

    To use, put a URL pattern such as::

        (r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root': '/path/to/my/files/'})

    in your URLconf. You must provide the ``document_root`` param. You may
    also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
    of the directory.  This index view will use the template hardcoded below,
    but if you'd like to override it, you can create a template called
    ``static/directory_index.html``.
    """
    path = posixpath.normpath(unquote(path))
    path = path.lstrip('/')
    newpath = ''
    for part in path.split('/'):
        if not part:
            # Strip empty path components.
            continue
        drive, part = os.path.splitdrive(part)
        head, part = os.path.split(part)
        if part in (os.curdir, os.pardir):
            # Strip '.' and '..' in path.
            continue
        newpath = os.path.join(newpath, part).replace('\\', '/')
    if newpath and path != newpath:
        return HttpResponseRedirect(newpath)
    fullpath = os.path.join(document_root, newpath)
    if os.path.isdir(fullpath):
        if show_indexes:
            return directory_index(newpath, fullpath)
        raise Http404(_("Directory indexes are not allowed here."))
    if not os.path.exists(fullpath):
        raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
    # Respect the If-Modified-Since header.
    statobj = os.stat(fullpath)
    if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
                              statobj.st_mtime, statobj.st_size):
        return HttpResponseNotModified()
    content_type, encoding = mimetypes.guess_type(fullpath)
    content_type = content_type or 'application/octet-stream'
    ranged_file = RangedFileReader(open(fullpath, 'rb'))
    response = StreamingHttpResponse(ranged_file,
                                     content_type=content_type)
    response["Last-Modified"] = http_date(statobj.st_mtime)
    if stat.S_ISREG(statobj.st_mode):
        size = statobj.st_size
        response["Content-Length"] = size
        response["Accept-Ranges"] = "bytes"
        # Respect the Range header.
        if "HTTP_RANGE" in request.META:
            try:
                ranges = parse_range_header(request.META['HTTP_RANGE'], size)
            except ValueError:
                ranges = None
            # only handle syntactically valid headers, that are simple (no
            # multipart byteranges)
            if ranges is not None and len(ranges) == 1:
                start, stop = ranges[0]
                if stop > size:
                    # requested range not satisfiable
                    return HttpResponse(status=416)
                ranged_file.start = start
                ranged_file.stop = stop
                response["Content-Range"] = "bytes %d-%d/%d" % (start, stop - 1, size)
                response["Content-Length"] = stop - start
                response.status_code = 206
    if encoding:
        response["Content-Encoding"] = encoding
    return response
Beispiel #42
0
 def test_content_length_header_not_added(self):
     resp = StreamingHttpResponse('content')
     self.assertNotIn('Content-Length', resp)
     resp = ConditionalGetMiddleware().process_response(self.req, resp)
     self.assertNotIn('Content-Length', resp)
Beispiel #43
0
def dataset_detail(request, slug, tablename=''):
    dataset = get_object_or_404(Dataset, slug=slug)
    if not tablename:
        tablename = dataset.get_default_table().name
        return redirect(
            reverse('core:dataset-table-detail',
                    kwargs={
                        'slug': slug,
                        'tablename': tablename
                    }))

    table = dataset.get_table(tablename)
    version = dataset.version_set.order_by('-order').first()
    fields = table.fields
    all_data = table.get_model().objects
    querystring = request.GET.copy()
    page_number = querystring.pop('page', ['1'])[0].strip() or '1'
    search_query = request.GET.get('search')
    order_by = querystring.pop('order-by', [''])
    order_by = [
        field.strip().lower() for field in order_by[0].split(',')
        if field.strip()
    ]

    if search_query:
        all_data = all_data.filter(search_data=SearchQuery(search_query))
    if querystring:
        keys = list(querystring.keys())
        for key in keys:
            if not querystring[key]:
                del querystring[key]
        all_data = all_data.apply_filters(querystring)

    all_data = all_data.apply_ordering(order_by)
    if (querystring.get('format', '') == 'csv'
            and 0 < all_data.count() <= max_export_rows):
        filename = '{}-{}.csv'.format(slug, uuid.uuid4().hex)
        pseudo_buffer = Echo()
        writer = csv.writer(pseudo_buffer, dialect=csv.excel)
        csv_rows = queryset_to_csv(all_data, fields)
        response = StreamingHttpResponse(
            (writer.writerow(row) for row in csv_rows),
            content_type='text/csv;charset=UTF-8',
        )
        response['Content-Disposition'] = (
            'attachment; filename="{}"'.format(filename))
        response.encoding = 'UTF-8'
        return response

    paginator = Paginator(all_data, 20)
    try:
        page = int(page_number)
    except ValueError:
        raise HttpResponseBadRequest
    data = paginator.get_page(page)

    if order_by:
        querystring['order-by'] = ','.join(order_by)
    if search_query:
        querystring['search'] = search_query
    context = {
        'data': data,
        'dataset': dataset,
        'table': table,
        'fields': fields,
        'max_export_rows': max_export_rows,
        'query_dict': querystring,
        'querystring': querystring.urlencode(),
        'search_query': search_query,
        'slug': slug,
        'table': table,
        'total_count': all_data.count(),
        'version': version,
    }
    return render(request, 'dataset-detail.html', context)
Beispiel #44
0
 def test_no_etag_streaming_response(self):
     req = HttpRequest()
     res = StreamingHttpResponse(['content'])
     self.assertFalse(CommonMiddleware().process_response(
         req, res).has_header('ETag'))
Beispiel #45
0
def stream_video(request):
    return StreamingHttpResponse(
        stream_response_generator(),
        content_type="multipart/x-mixed-replace;boundary=frame")
Beispiel #46
0
 def terminal_output(self, request, pk):
     instance: Action = self.get_object()
     file: str = instance.get_terminal_path()
     if not os.path.lexists(file) or not os.path.getsize(file):
         raise Http404
     return StreamingHttpResponse(asciinema_cat(file))
def multiple_download(request):
    response = StreamingHttpResponse(open('classifier/result/result.csv',
                                          'rb'),
                                     content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="result.csv"'
    return response
Beispiel #48
0
def Robots(request):

    html = 'User-agent: * Disallow: /admin/ Disallow: /coordinate/ Disallow: /user/ Sitemap: http://www.trovabiomassa.com/sitemap.xml'
    return StreamingHttpResponse(html, content_type="text/html")
Beispiel #49
0
    def test_streaming_response(self):
        r = StreamingHttpResponse(iter(["hello", "world"]))

        # iterating over the response itself yields bytestring chunks.
        chunks = list(r)
        self.assertEqual(chunks, [b"hello", b"world"])
        for chunk in chunks:
            self.assertIsInstance(chunk, six.binary_type)

        # and the response can only be iterated once.
        self.assertEqual(list(r), [])

        # even when a sequence that can be iterated many times, like a list,
        # is given as content.
        r = StreamingHttpResponse(["abc", "def"])
        self.assertEqual(list(r), [b"abc", b"def"])
        self.assertEqual(list(r), [])

        # streaming responses don't have a `content` attribute.
        self.assertFalse(hasattr(r, "content"))

        # and you can't accidentally assign to a `content` attribute.
        with self.assertRaises(AttributeError):
            r.content = "xyz"

        # but they do have a `streaming_content` attribute.
        self.assertTrue(hasattr(r, "streaming_content"))

        # that exists so we can check if a response is streaming, and wrap or
        # replace the content iterator.
        r.streaming_content = iter(["abc", "def"])
        r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
        self.assertEqual(list(r), [b"ABC", b"DEF"])

        # coercing a streaming response to bytes doesn't return a complete HTTP
        # message like a regular response does. it only gives us the headers.
        r = StreamingHttpResponse(iter(["hello", "world"]))
        self.assertEqual(six.binary_type(r), b"Content-Type: text/html; charset=utf-8")

        # and this won't consume its content.
        self.assertEqual(list(r), [b"hello", b"world"])

        # additional content cannot be written to the response.
        r = StreamingHttpResponse(iter(["hello", "world"]))
        with self.assertRaises(Exception):
            r.write("!")

        # and we can't tell the current position.
        with self.assertRaises(Exception):
            r.tell()
Beispiel #50
0
def Sitemap(request):
    html = [
        '''<?xml version="1.0" encoding="UTF-8"?>
        <urlset
           xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
           http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
            <url>
                <loc>http://www.trovabiomassa.com/</loc>
            </url>
            <url>
                <loc>http://www.trovabiomassa.com/privacy/</loc>
            </url>
            <url>
                <loc>http://www.trovabiomassa.com/about/</loc>
            </url>
            <url>
                <loc>http://www.trovabiomassa.com/energia/</loc>
            </url>'''
    ]

    #to be optimized
    region_provinces, products = makeDicts()

    for k, v in products.iteritems():
        html.append('<url><loc>http://www.trovabiomassa.com/cercabiomassa/' +
                    v + '/</loc></url>')
        html.append('<url><loc>http://www.trovabiomassa.com/trovabiomassa/' +
                    v + '/</loc></url>')
        regions = []
        for key, val in region_provinces.iteritems():
            provinces = []
            if val[0] not in regions:
                regions.append(val[0])
                html.append(
                    '<url><loc>http://www.trovabiomassa.com/cercabiomassa/' +
                    v + '/' + val[0] + '/</loc></url>')
                for el in val:
                    if val.index(el) != 0:
                        if el not in provinces:
                            provinces.append(el)
                            for e in el:
                                html.append(
                                    '<url><loc>http://www.trovabiomassa.com/cercabiomassa/'
                                    + v + '/' + val[0] + '/' + e[1] +
                                    '/</loc></url>')

        for f in Fuel.objects.all():
            set = [
                f.product.slug, f.reseller.region, f.reseller.province,
                f.reseller.name,
                str(f.reseller.id)
            ]
            for e in set:
                e.encode('utf8')
                set[set.index(e)] = quote_plus(e.lower().encode('utf8'))
            html.append('<url><loc>http://www.trovabiomassa.com/' + set[0] +
                        '/' + set[1] + '/' + set[2] + '/trova/' + set[3] +
                        '/' + set[4] + '/' + set[0] + '/</loc></url>')

    html.append('</urlset>')

    ''.join(html)

    return StreamingHttpResponse(html, content_type="text/xml")
Beispiel #51
0
    def do_request(self, request, url, method, request_data):

        url = iri_to_uri(url)

        request_data.update({
            "method": method,
            "url": url,
            "original-request": request,
        })

        request_data.setdefault("data", None)
        request_data.setdefault("headers", {})
        request_data.setdefault("cookies", SimpleCookie())
        request_data.setdefault("user", request.user)

        # Request creation
        proto, host, cgi, param, query = urlparse(url)[:5]

        # Build the Via header
        protocolVersion = self.protocolRE.match(request.META['SERVER_PROTOCOL'])
        if protocolVersion is not None:
            protocolVersion = protocolVersion.group(1)
        else:
            protocolVersion = '1.1'

        via_header = "%s %s (Wirecloud-python-Proxy/1.1)" % (protocolVersion, get_current_domain(request))
        if 'via' in request_data['headers']:
            request_data['headers']['via'] += ', ' + via_header
        else:
            request_data['headers']['via'] = via_header

        # XFF headers
        if 'x-forwarded-for' in request_data['headers']:
            request_data['headers']['x-forwarded-for'] += ', ' + request.META['REMOTE_ADDR']
        else:
            request_data['headers']['x-forwarded-for'] = request.META['REMOTE_ADDR']

        # Pass proxy processors to the new request
        try:
            for processor in get_request_proxy_processors():
                processor.process_request(request_data)
        except ValidationError as e:
            return e.get_response(request)

        # Cookies
        cookie_header_content = ', '.join([request_data['cookies'][key].OutputString() for key in request_data['cookies']])
        if cookie_header_content != '':
            request_data['headers']['Cookie'] = cookie_header_content

        # Seems that Django or WSGI provides default values for the
        # Content-Length and Content-Type headers, so we are not able to detect
        # if the request provided them :(
        if str(request_data['headers'].get('content-length', '0')).strip() == '0':
            request_data['data'] = None
            if 'content-type' in request_data['headers']:
                del request_data['headers']['content-type']

        # Open the request
        try:
            res = requests.request(request_data['method'], request_data['url'], headers=request_data['headers'], data=request_data['data'], stream=True, verify=getattr(settings, 'WIRECLOUD_HTTPS_VERIFY', True))
        except requests.exceptions.Timeout as e:
            return build_error_response(request, 504, _('Gateway Timeout'), details=six.text_type(e))
        except requests.exceptions.SSLError as e:
            return build_error_response(request, 502, _('SSL Error'), details=six.text_type(e))
        except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.TooManyRedirects) as e:
            return build_error_response(request, 504, _('Connection Error'), details=six.text_type(e))

        # Build a Django response
        response = StreamingHttpResponse(res.raw.stream(4096, decode_content=False), status=res.status_code, reason=res.reason)

        # Add all the headers received from the response
        for header in res.headers:

            header_lower = header.lower()
            if header_lower == 'set-cookie':

                for cookie in res.cookies:
                    response.set_cookie(cookie.name, value=cookie.value, expires=cookie.expires, path=cookie.path)

            elif header_lower == 'via':

                via_header = via_header + ', ' + res.headers[header]

            elif is_valid_response_header(header_lower):
                response[header] = res.headers[header]

        # Pass proxy processors to the response
        for processor in get_response_proxy_processors():
            response = processor.process_response(request_data, response)

        response['Via'] = via_header

        return response
Beispiel #52
0
def deploy_nginx_tmp_file(request):
    content = open('/tmp/tmp_nginx.conf', 'r').read()
    response = StreamingHttpResponse(content)
    response['Content-Type'] = 'text/plain; charset=utf8'
    return response
Beispiel #53
0
    def do_request(self, request, url, method):

        url = iri_to_uri(url)

        request_data = {
            "method": method,
            "url": url,
            "data": None,
            "headers": {},
            "cookies": Cookie.SimpleCookie(),
            "user": request.user,
            "original-request": request,
        }

        # Request creation
        proto, host, cgi, param, query = urlparse.urlparse(url)[:5]

        # Extract headers from META
        if 'HTTP_TRANSFER_ENCODING' in request.META:
            return build_error_response(request, 500, "Wirecloud doesn't support requests using Transfer-Encodings")

        for header in request.META.items():
            header_name = header[0].lower()
            if header_name == 'content_type' and header[1]:
                request_data['headers']["content-type"] = header[1]

            elif header_name == 'content_length' and header[1]:
                # Only take into account request body if the request has a
                # Content-Length header (we don't support chunked requests)
                request_data['data'] = request

                # It's better not propagate the Content-Length header as
                # request processors may change final data length. In addition
                # to this, the requests modules ignores the Content-Length
                # header and tries to obtain data length directly from the
                # data parameter. Therefore, providing this value in the len
                # attribute seems to be the best option
                request_data['data'].len = int(header[1])

            elif header_name == 'cookie' or header_name == 'http_cookie':

                cookie_parser = Cookie.SimpleCookie(str(header[1]))

                del cookie_parser[settings.SESSION_COOKIE_NAME]

                if settings.CSRF_COOKIE_NAME in cookie_parser:
                    del cookie_parser[settings.CSRF_COOKIE_NAME]

                request_data['cookies'].update(cookie_parser)

            elif self.http_headerRE.match(header_name) and not header_name in self.blacklisted_http_headers:

                fixed_name = header_name.replace("http_", "", 1).replace('_', '-')
                request_data['headers'][fixed_name] = header[1]

        # Build the Via header
        protocolVersion = self.protocolRE.match(request.META['SERVER_PROTOCOL'])
        if protocolVersion is not None:
            protocolVersion = protocolVersion.group(1)
        else:
            protocolVersion = '1.1'

        via_header = "%s %s (Wirecloud-python-Proxy/1.1)" % (protocolVersion, get_current_domain(request))
        if 'via' in request_data['headers']:
            request_data['headers']['via'] += ', ' + via_header
        else:
            request_data['headers']['via'] = via_header

        # XFF headers
        if 'x-forwarded-for' in request_data['headers']:
            request_data['headers']['x-forwarded-for'] += ', ' + request.META['REMOTE_ADDR']
        else:
            request_data['headers']['x-forwarded-for'] = request.META['REMOTE_ADDR']

        request_data['headers']['x-forwarded-host'] = host
        if 'x-forwarded-server' in request_data['headers']:
            del request_data['headers']['x-forwarded-server']

        # Pass proxy processors to the new request
        try:
            for processor in get_request_proxy_processors():
                processor.process_request(request_data)
        except ValidationError as e:
            return e.get_response()

        # Cookies
        cookie_header_content = ', '.join([cookie_parser[key].OutputString() for key in request_data['cookies']])
        if cookie_header_content != '':
            request_data['headers']['Cookie'] = cookie_header_content

        # Open the request
        try:
            res = requests.request(request_data['method'], request_data['url'], headers=request_data['headers'], data=request_data['data'], stream=True)
        except requests.exceptions.HTTPError:
            return HttpResponse(status=504)
        except requests.exceptions.ConnectionError:
            return HttpResponse(status=502)

        # Build a Django response
        response = StreamingHttpResponse(res.raw.stream(4096, decode_content=False))

        # Set status code to the response
        response.status_code = res.status_code

        # Add all the headers received from the response
        for header in res.headers:

            header_lower = header.lower()
            if header_lower == 'set-cookie':

                for cookie in res.cookies:
                    response.set_cookie(cookie.name, value=cookie.value, expires=cookie.expires, path=cookie.path)

            elif header_lower == 'via':

                via_header = via_header + ', ' + res.headers[header]

            elif is_valid_response_header(header_lower):
                response[header] = res.headers[header]

        # Pass proxy processors to the response
        for processor in get_response_proxy_processors():
            response = processor.process_response(request_data, response)

        response['Via'] = via_header

        return response
Beispiel #54
0
 def test_content_length_header_not_added_for_streaming_response(self):
     response = StreamingHttpResponse('content')
     self.assertNotIn('Content-Length', response)
     response = CommonMiddleware().process_response(HttpRequest(), response)
     self.assertNotIn('Content-Length', response)
Beispiel #55
0
    def test_streaming_response(self):
        r = StreamingHttpResponse(iter(['hello', 'world']))

        # iterating over the response itself yields bytestring chunks.
        chunks = list(r)
        self.assertEqual(chunks, [b'hello', b'world'])
        for chunk in chunks:
            self.assertIsInstance(chunk, six.binary_type)

        # and the response can only be iterated once.
        self.assertEqual(list(r), [])

        # even when a sequence that can be iterated many times, like a list,
        # is given as content.
        r = StreamingHttpResponse(['abc', 'def'])
        self.assertEqual(list(r), [b'abc', b'def'])
        self.assertEqual(list(r), [])

        # streaming responses don't have a `content` attribute.
        self.assertFalse(hasattr(r, 'content'))

        # and you can't accidentally assign to a `content` attribute.
        with self.assertRaises(AttributeError):
            r.content = 'xyz'

        # but they do have a `streaming_content` attribute.
        self.assertTrue(hasattr(r, 'streaming_content'))

        # that exists so we can check if a response is streaming, and wrap or
        # replace the content iterator.
        r.streaming_content = iter(['abc', 'def'])
        r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
        self.assertEqual(list(r), [b'ABC', b'DEF'])

        # coercing a streaming response to bytes doesn't return a complete HTTP
        # message like a regular response does. it only gives us the headers.
        r = StreamingHttpResponse(iter(['hello', 'world']))
        self.assertEqual(six.binary_type(r),
                         b'Content-Type: text/html; charset=utf-8')

        # and this won't consume its content.
        self.assertEqual(list(r), [b'hello', b'world'])

        # additional content cannot be written to the response.
        r = StreamingHttpResponse(iter(['hello', 'world']))
        with self.assertRaises(Exception):
            r.write('!')

        # and we can't tell the current position.
        with self.assertRaises(Exception):
            r.tell()
Beispiel #56
0
    d = os.path.join(settings.PROJECT_ROOT, 'backup', type)
    filename = os.path.join(d, filename)
    if not os.path.isfile(filename):
        return render(request, 'error.html', {'error_msg': '没有找到相关文件'})

    def file_iterator(filename, chunk_size=512):
        with open(filename, 'rb') as f:
            while True:
                c = f.read(chunk_size)
                if c:
                    yield c
                else:
                    break

    response = StreamingHttpResponse(file_iterator(filename))
    response['Content-Type'] = 'application/octet-stream'
    response[
        'Content-Disposition'] = 'attachment; filename="%s"' % os.path.basename(
            filename)

    redis.set(settings.LAST_BACKUP_KEY, time.time())

    return response


class PaperdbPaperManage(View):
    def get(self, request):
        ctx = {}
        return render(request, 'myadmin/modulemanage/paperdb/paper/view.html',
                      ctx)