예제 #1
0
파일: asgi.py 프로젝트: lauralorenz/django
 async def read_body(self, read):
     """
     Reads a HTTP body from an ASGI connection.
     """
     body = b""
     while True:
         message = await receive()
         if message["type"] == "http.disconnect":
             # Bye bye.
             raise RequestAborted()
         else:
             # See if the message has body, and if it's the end, launch into
             # handling (and a synchronous subthread)
             if "body" in message:
                 body += message["body"]
             if not message.get("more_body", False):
                 break
     # Limit the maximum request data size that will be handled in-memory.
     # TODO: Stream the body to temp disk instead?
     # (we can't provide a file-like with direct reading as we would not be async)
     if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
             self._content_length > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
         raise RequestDataTooBig(
             "Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.")
     return body
예제 #2
0
def result(request):
    # t1 = time.time()
    query = request.GET.get('searchword')
    # TODO: validator 작성? [a-zA-Aㄱ-힣0-9-_!@#$%&*()=+.,/?'";:[]{}~₩]  // &# 비허용

    if not query:
        raise Http404()

    if len(query.encode("utf8")) > 100:  # ring에서 prefix를 붙이므로 적절히 100으로 한다
        raise RequestDataTooBig(
            'Memcached key length must be less than 100 bytes')

    key = query.strip().replace(" ", ".")
    total_resultset = search_books(key)

    page = request.GET.get('page')
    paginator = Paginator(total_resultset, 20)
    try:
        total_resultset_paged = paginator.page(page)
    except PageNotAnInteger:
        total_resultset_paged = paginator.page(1)
    except EmptyPage:
        total_resultset_paged = paginator.page(paginator.num_pages)

    context = {
        'total_resultset_paged': total_resultset_paged,
        'yes24_url': yes24_base_url + quote(query, encoding="euc-kr"),
        'aladin_url': aladin_base_url + quote(query, encoding="euc-kr"),
    }
    # print(time.time()-t1, '초, 총 소요시간')

    return render(request, 'moa/search_result.html', context)
예제 #3
0
    def create(self, validated_data):
        """
        Create report instance and submit it to civicrm server, given the validated data.
        """
        instance = Report()
        try:  #loading the encryption key
            report_decryptor = ReportDecryptor(
                server_settings.RECEIVER_SECRET_KEY_FILENAME,
                receiver_key_id=validated_data.get('encryption_key_id'))
        except RuntimeError as e:
            raise RuntimeError("unable to retrieve the report encryption key")

        #the correct way of doing this to invoke the serializer
        #of the report here for validation
        try:
            decrypted_report = json.loads(
                report_decryptor.decrypt_report(
                    validated_data.get('encrypted_blob')))
        except:
            raise RuntimeError("submitted Encrypted blob is badly formated")
        if ((not 'client_id' in decrypted_report.keys())
                or (not 'report_body' in decrypted_report.keys())):
            raise RuntimeError("both client id and report body are required")

        instance.client_id = decrypted_report['client_id']
        #first we check if the request is coming from a valid client
        self._validate_client_id(instance.client_id)

        #now that we have a valid client_id we retrieve client_session
        #make sure the client has not passed their submission quota
        client_record = self._get_client_submission_record(instance.client_id)
        today_key = datetime.datetime.today().strftime('%Y-%m-%d')
        self._verify_client_no_of_submission_quota_usage(
            client_record, today_key)

        try:
            instance.submission_time = validated_data.get('submission_time')
            instance.report_id = decrypted_report['report_id']
            instance.report_body = decrypted_report['report_body']
            instance.reporter_name = decrypted_report['name']
            instance.reporter_email = decrypted_report['email']
            instance.reporter_telegram = decrypted_report['telegram']
        except:
            raise RuntimeError("Error in the set of submitted fields")
        #reject if the report is too large
        #all other fields are size limited in civicrm so the repor_body is the only concern
        if (len(decrypted_report['report_body']) >
                server_settings.MAX_SIZE_OF_SUBMISSION):
            raise RequestDataTooBig("too big of a report")

        instance.report_body = decrypted_report['report_body']
        instance.save()

        #counting client's daily submission
        client_record[today_key + "_no"] += 1
        client_record.save()

        return instance
예제 #4
0
def check_content_length(parser_context):

    if parser_context and settings.DATA_UPLOAD_MAX_MEMORY_SIZE and 'request' in parser_context:

        try:
            content_length = int(parser_context['request'].META.get(
                'CONTENT_LENGTH', 0))
        except (ValueError, TypeError):
            content_length = 0

        if content_length and content_length > settings.DATA_UPLOAD_MAX_MEMORY_SIZE or content_length < 0:
            raise RequestDataTooBig(
                'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
예제 #5
0
 def handle_raw_input(self,
                      input_data,
                      META,
                      content_length,
                      boundary,
                      encoding=None):
     """
     Handle the raw input from the client.
     """
     max_size = getattr(settings, 'CUSTOM_UPLOAD_MAX_FILE_SIZE',
                        10 * 2**30)  # default 10GB
     if max_size is None:
         return
     if content_length > max_size:
         raise RequestDataTooBig(gettext('上传文件超过大小限制'))
예제 #6
0
    def body(self):
        if not hasattr(self, '_body'):
            if self._read_started:
                raise RawPostDataException("You cannot access body after reading from request's data stream")

            # Limit the maximum request data size that will be handled in-memory.
            if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
                    int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')

            try:
                self._body = self.read()
            except IOError as e:
                raise UnreadablePostError(*e.args) from e
            self._stream = BytesIO(self._body)
        return self._body
예제 #7
0
 def handle_raw_input(self,
                      input_data,
                      META,
                      content_length,
                      boundary,
                      encoding=None):
     """
     Handle the raw input from the client.
     """
     max_size = self.get_max_size_upload_limit()
     if max_size is None:
         return
     if content_length > max_size:
         raise RequestDataTooBig(gettext('上传文件超过大小限制'))
     if content_length <= 0:
         raise Exception(gettext('无效的标头Content-Length'))
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Return a tuple containing the POST and FILES dictionary, respectively.
        """
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(
                self._input_data,
                self._meta,
                self._content_length,
                self._boundary,
                encoding,
            )
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict(mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_str(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                    num_post_keys += 1
                    if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                            and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                            num_post_keys):
                        raise TooManyFieldsSent(
                            'The number of GET/POST parameters exceeded '
                            'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.')

                    # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                    if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                        read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read(size=read_size)
                        num_bytes_read += len(raw_data)
                        try:
                            data = base64.b64decode(raw_data)
                        except binascii.Error:
                            data = raw_data
                    else:
                        data = field_stream.read(size=read_size)
                        num_bytes_read += len(data)

                    # Add two here to make the check consistent with the
                    # x-www-form-urlencoded check that includes '&='.
                    num_bytes_read += len(field_name) + 2
                    if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
                            and num_bytes_read >
                            settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                        raise RequestDataTooBig(
                            'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
                        )

                    self._post.appendlist(
                        field_name, force_str(data, encoding,
                                              errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if file_name:
                        file_name = force_str(file_name,
                                              encoding,
                                              errors='replace')
                        file_name = self.IE_sanitize(html.unescape(file_name))
                    if not file_name:
                        continue

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(
                                    field_name,
                                    file_name,
                                    content_type,
                                    content_length,
                                    charset,
                                    content_type_extra,
                                )
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 -
                                                                   remaining)
                                    stripped_chunk += b"".join(
                                        over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as exc:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError(
                                        "Could not decode base64 data."
                                    ) from exc

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # Don't continue if the chunk received by
                                    # the handler is None.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        # any() shortcircuits if a handler's upload_complete() returns a value.
        any(handler.upload_complete() for handler in handlers)
        self._post._mutable = False
        return self._post, self._files
예제 #9
0
    def _handle_raw_input_without_file_stream(self,
                                              input_data,
                                              META,
                                              raw_content_length,
                                              boundary,
                                              encoding=None):
        """
        Replaces django.http.multipartparser.MultiPartParser.parse
        A rfc2388 multipart/form-data parser but replacing the file stream to the creation of empty files.
        Returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
        """
        # Create the data structures to be used later.
        _post = QueryDict(mutable=True)
        _files = MultiValueDict()

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        _chunk_size = min([2**31 - 4, self.chunk_size])

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(input_data, _chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None

        for item_type, meta_data, field_stream in Parser(stream, boundary):
            if old_field_name:
                # We run this at the beginning of the next loop
                # since we cannot be sure a file is complete until
                # we hit the next boundary/part of the multipart content.
                file_obj = self.file_complete(raw_content_length)
                if file_obj:
                    # If it returns a file object, then set the files dict.
                    _files.appendlist(
                        force_str(old_field_name, encoding, errors="replace"),
                        file_obj)
                old_field_name = None

            try:
                disposition = meta_data["content-disposition"][1]
                field_name = disposition["name"].strip()
            except (KeyError, IndexError, AttributeError):
                continue

            transfer_encoding = meta_data.get("content-transfer-encoding")
            if transfer_encoding is not None:
                transfer_encoding = transfer_encoding[0].strip()
            field_name = force_str(field_name, encoding, errors="replace")

            if item_type == FIELD:
                # NOTE: Parse fields as usual, same as ``MultiPartParser.parse``
                # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                num_post_keys += 1
                if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                        and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                        num_post_keys):
                    raise TooManyFieldsSent(
                        "The number of GET/POST parameters exceeded "
                        "settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.")

                # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                    read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                # This is a post field, we can just set it in the post
                if transfer_encoding == "base64":
                    raw_data = field_stream.read(size=read_size)
                    num_bytes_read += len(raw_data)
                    try:
                        data = base64.b64decode(raw_data)
                    except binascii.Error:
                        data = raw_data
                else:
                    data = field_stream.read(size=read_size)
                    num_bytes_read += len(data)

                # Add two here to make the check consistent with the
                # x-www-form-urlencoded check that includes '&='.
                num_bytes_read += len(field_name) + 2
                if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
                        num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                    raise RequestDataTooBig(
                        "Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
                    )

                _post.appendlist(field_name,
                                 force_str(data, encoding, errors="replace"))
            elif item_type == FILE:
                # NOTE: Parse files WITHOUT a stream.
                # This is a file, use the handler...
                file_name = disposition.get("filename")
                if file_name:
                    file_name = force_str(file_name,
                                          encoding,
                                          errors="replace")
                    file_name = self.sanitize_file_name(file_name)
                if not file_name:
                    continue

                content_type, content_type_extra = meta_data.get(
                    "content-type", ("", {}))
                content_type = content_type.strip()
                charset = content_type_extra.get("charset")
                content_length = None

                self.new_file(field_name, file_name, content_type,
                              content_length, charset, content_type_extra)

                # Handle file upload completions on next iteration.
                old_field_name = field_name
            else:
                # If this is neither a FIELD or a FILE, just exhaust the stream.
                exhaust(stream)

        # Make sure that the request data is all fed
        exhaust(input_data)

        _post._mutable = False
        return _post, _files
예제 #10
0
파일: http.py 프로젝트: vaclavrak/channels
    def __init__(self, scope, body):
        self.scope = scope
        self._content_length = 0
        self._post_parse_error = False
        self._read_started = False
        self.resolver_match = None
        self.script_name = self.scope.get("root_path", "")
        if self.script_name and scope["path"].startswith(self.script_name):
            # TODO: Better is-prefix checking, slash handling?
            self.path_info = scope["path"][len(self.script_name) :]
        else:
            self.path_info = scope["path"]

        # django path is different from asgi scope path args, it should combine with script name
        if self.script_name:
            self.path = "%s/%s" % (
                self.script_name.rstrip("/"),
                self.path_info.replace("/", "", 1),
            )
        else:
            self.path = scope["path"]

        # HTTP basics
        self.method = self.scope["method"].upper()
        # fix https://github.com/django/channels/issues/622
        query_string = self.scope.get("query_string", "")
        if isinstance(query_string, bytes):
            query_string = query_string.decode("utf-8")
        self.META = {
            "REQUEST_METHOD": self.method,
            "QUERY_STRING": query_string,
            "SCRIPT_NAME": self.script_name,
            "PATH_INFO": self.path_info,
            # Old code will need these for a while
            "wsgi.multithread": True,
            "wsgi.multiprocess": True,
        }
        if self.scope.get("client", None):
            self.META["REMOTE_ADDR"] = self.scope["client"][0]
            self.META["REMOTE_HOST"] = self.META["REMOTE_ADDR"]
            self.META["REMOTE_PORT"] = self.scope["client"][1]
        if self.scope.get("server", None):
            self.META["SERVER_NAME"] = self.scope["server"][0]
            self.META["SERVER_PORT"] = str(self.scope["server"][1])
        else:
            self.META["SERVER_NAME"] = "unknown"
            self.META["SERVER_PORT"] = "0"
        # Handle old style-headers for a transition period
        if "headers" in self.scope and isinstance(self.scope["headers"], dict):
            self.scope["headers"] = [
                (x.encode("latin1"), y) for x, y in self.scope["headers"].items()
            ]
        # Headers go into META
        for name, value in self.scope.get("headers", []):
            name = name.decode("latin1")
            if name == "content-length":
                corrected_name = "CONTENT_LENGTH"
            elif name == "content-type":
                corrected_name = "CONTENT_TYPE"
            else:
                corrected_name = "HTTP_%s" % name.upper().replace("-", "_")
            # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
            value = value.decode("latin1")
            if corrected_name in self.META:
                value = self.META[corrected_name] + "," + value
            self.META[corrected_name] = value
        # Pull out request encoding if we find it
        if "CONTENT_TYPE" in self.META:
            self.content_type, self.content_params = cgi.parse_header(
                self.META["CONTENT_TYPE"]
            )
            if "charset" in self.content_params:
                try:
                    codecs.lookup(self.content_params["charset"])
                except LookupError:
                    pass
                else:
                    self.encoding = self.content_params["charset"]
        else:
            self.content_type, self.content_params = "", {}
        # Pull out content length info
        if self.META.get("CONTENT_LENGTH", None):
            try:
                self._content_length = int(self.META["CONTENT_LENGTH"])
            except (ValueError, TypeError):
                pass
        # Body handling
        # TODO: chunked bodies

        # Limit the maximum request data size that will be handled in-memory.
        if (
            settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
            and self._content_length > settings.DATA_UPLOAD_MAX_MEMORY_SIZE
        ):
            raise RequestDataTooBig(
                "Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
            )

        self._body = body
        assert isinstance(self._body, bytes), "Body is not bytes"
        # Add a stream-a-like for the body
        self._stream = BytesIO(self._body)
        # Other bits
        self.resolver_match = None
예제 #11
0
    def create(self, validated_data):
        """
        Create and return a new `Snippet` instance, given the validated data.
        """
        instance = ReportAttachment()
        try:
            attachment_decryptor = ReportDecryptor(
                server_settings.RECEIVER_SECRET_KEY_FILENAME,
                receiver_key_id=validated_data.get('encryption_kdey_id'))
        except RuntimeError as e:
            raise RuntimeError("unable to retrieve the report encryption key")

        try:
            decrypted_attachment = json.loads(
                attachment_decryptor.decrypt_report(
                    validated_data.get('encrypted_blob')))
        except:
            raise RuntimeError("submitted Encrypted blob is badly formated")

        if (not 'client_id' in decrypted_attachment.keys()):
            raise RuntimeError("client id is required")

        #first we check if the request is coming from a valid client
        instance.client_id = decrypted_attachment['client_id']
        self._validate_client_id(instance.client_id)

        try:
            instance.attachment_id = decrypted_attachment['attachment_id']
            instance.report_id = decrypted_attachment['report_id']
            instance.submission_time = validated_data.get('submission_time')
            instance.attachment_type = decrypted_attachment['attachment_type']

        except:
            raise RuntimeError("Error in the set of submitted fields")

        if (validated_data.get('attachment_data')):
            instance.attachment_data = self._decrypt_attachement(
                decrypted_attachment['encryption_key'],
                decrypted_attachment['encryption_iv'],
                validated_data.get('attachment_data').read())
        elif (validated_data.get('s3_submission') == True):
            data_retreival_response = self._retrieve_attachment_from_s3(
                instance.attachment_id)
            if data_retreival_response.status_code != 200:
                raise RuntimeError(
                    'Failed to retrieve attachment data from S3 due to Error %s'
                    % data_retreival_response.status_code)

            instance.attachment_data = self._decrypt_attachement(
                decrypted_attachment['encryption_key'],
                decrypted_attachment['encryption_iv'],
                data_retreival_response.content)
        else:
            raise RuntimeError(
                "submission includes attachment meta data without  body")

        current_submission_size = len(instance.attachment_data)
        if (current_submission_size > server_settings.MAX_SIZE_OF_SUBMISSION):
            raise RequestDataTooBig("too big of a attachment")

        #now that we have a valid client_id we retrieve client_session
        #make sure the client has not passed their size of submission quota
        #otherwise raise an exception
        client_record = self._get_client_submission_record(instance.client_id)
        today_key = datetime.datetime.today().strftime('%Y-%m-%d')
        self._verify_client_size_of_submission_quota_usage(
            client_record, today_key, current_submission_size)

        instance.save()

        client_record[today_key + "_size"] += current_submission_size
        client_record.save()

        return instance