def parse_headers_and_body_with_django(headers, body): """Parse `headers` and `body` with Django's :class:`MultiPartParser`. `MultiPartParser` is a curiously ugly and RFC non-compliant concoction. Amongst other things, it coerces all field names, field data, and filenames into Unicode strings using the "replace" error strategy, so be warned that your data may be silently mangled. It also, in 1.3.1 at least, does not recognise any transfer encodings at *all* because its header parsing code was broken. I'm also fairly sure that it'll fall over on headers than span more than one line. In short, it's a piece of code that inspires little confidence, yet we must work with it, hence we need to round-trip test multipart handling with it. """ handler = MemoryFileUploadHandler() meta = { "HTTP_CONTENT_TYPE": headers["Content-Type"], "HTTP_CONTENT_LENGTH": headers["Content-Length"], } parser = MultiPartParser( META=meta, input_data=BytesIO(body), upload_handlers=[handler]) return parser.parse()
def parse_file_upload(self, META, post_data): """Returns a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse()
def wrapper(request, *args, **kwargs): # Update the query dicts only if it's DELETE if request.method == 'DELETE': parser = MultiPartParser(request.META, request, [], request.encoding) # We don't need anything special here, we just want to read a couple of parameters here query_dict, files = parser.parse() # And we don't need files, as files are never processed even if sent request.DELETE = query_dict return function(request, *args, **kwargs)
def parse(self, stream): """ Returns a 2-tuple of `(data, files)`. `data` will be a :class:`QueryDict` containing all the form parameters. `files` will be a :class:`QueryDict` containing all the form files. """ upload_handlers = self.view.request._get_upload_handlers() django_parser = DjangoMultiPartParser(self.view.request.META, stream, upload_handlers) return django_parser.parse()
def parse_request_data(self): if self.request.method == 'POST': return self.request.POST, self.request.FILES data = self.request.raw_post_data if self.request.META.get('CONTENT_TYPE', '').startswith('multipart'): data = StringIO(data) parser = MultiPartParser(self.request.META, data, self.upload_handlers) query = parser.parse() return query return QueryDict(data), {}
def parse(self, stream): """ Returns a 2-tuple of `(data, files)`. `data` will be a :class:`QueryDict` containing all the form parameters. `files` will be a :class:`QueryDict` containing all the form files. """ upload_handlers = self.view.request._get_upload_handlers() try: django_parser = DjangoMultiPartParser(self.view.request.META, stream, upload_handlers) return django_parser.parse() except MultiPartParserError, exc: raise ErrorResponse(status.HTTP_400_BAD_REQUEST, {'detail': 'multipart parse error - %s' % unicode(exc)})
def _document_PUT(request, document_slug, document_locale): """Handle PUT requests as document write API""" # Try parsing one of the supported content types from the request try: content_type = request.META.get('CONTENT_TYPE', '') if content_type.startswith('application/json'): data = json.loads(request.body) elif content_type.startswith('multipart/form-data'): parser = MultiPartParser(request.META, StringIO(request.body), request.upload_handlers, request.encoding) data, files = parser.parse() elif content_type.startswith('text/html'): # TODO: Refactor this into wiki.content ? # First pass: Just assume the request body is an HTML fragment. html = request.body data = dict(content=html) # Second pass: Try parsing the body as a fuller HTML document, # and scrape out some of the interesting parts. try: doc = pq(html) head_title = doc.find('head title') if head_title.length > 0: data['title'] = head_title.text() body_content = doc.find('body') if body_content.length > 0: data['content'] = body_content.html() except: pass else: resp = HttpResponse() resp.status_code = 400 resp.content = _("Unsupported content-type: %s") % content_type return resp except Exception, e: resp = HttpResponse() resp.status_code = 400 resp.content = _("Request parsing error: %s") % e return resp
def _parse_request_data(request): # Try parsing one of the supported content types from the request try: content_type = request.META.get("CONTENT_TYPE", "") if content_type.startswith("application/json"): return (json.loads(request.body), None, None) elif content_type.startswith("multipart/form-data"): parser = MultiPartParser(request.META, StringIO(request.body), request.upload_handlers, request.encoding) data, files = parser.parse() return (data, files, None) else: return (None, None, _bad_request(_("Unsupported content-type: %s") % content_type)) except Exception, e: return (None, None, _bad_request(_("Request parsing error: %s") % e))
def parse(self, stream, media_type=None, parser_context=None): """ Returns a DataAndFiles object. `.data` will be a `QueryDict` containing all the form parameters. `.files` will be a `QueryDict` containing all the form files. """ parser_context = parser_context or {} request = parser_context['request'] meta = request.META upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers) data, files = parser.parse() return DataAndFiles(data, files) except MultiPartParserError, exc: raise ParseError('Multipart form parse error - %s' % unicode(exc))
def parse(self, stream, media_type=None, parser_context=None): """ Returns a DataAndFiles object. `.data` will be a `QueryDict` containing all the form parameters. `.files` will be a `QueryDict` containing all the form files. """ parser_context = parser_context or {} request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) meta = request.META upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding) data, files = parser.parse() return DataAndFiles(data, files) except MultiPartParserError as exc: raise ParseError('Multipart form parse error - %s' % six.u(exc))
def test_bad_type_content_length(self): multipart_parser = MultiPartParser( { "CONTENT_TYPE": "multipart/form-data; boundary=_foo", "CONTENT_LENGTH": "a", }, StringIO("x"), [], "utf-8", ) self.assertEqual(multipart_parser._content_length, 0)
def parse(self, stream, media_type=None, parser_context=None): """ Parses the incoming bytestream as a multipart encoded form, and returns a DataAndFiles object. `.data` will be a `QueryDict` containing all the form parameters. `.files` will be a `QueryDict` containing all the form files. """ parser_context = parser_context or {} request = parser_context["request"] encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET) meta = request.META.copy() meta["CONTENT_TYPE"] = media_type upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding) data, files = parser.parse() return DataAndFiles(data, files) except MultiPartParserError as exc: raise ParseError("Multipart form parse error - %s" % str(exc))
def parse(self, stream, media_type=None, parser_context=None): ''' Parses the incoming bytestream as a multipart encoded form, and returns a DataAndFiles object. `.data` will be a `QueryDict` containing all the form parameters. `.files` will be a `QueryDict` containing all the form files. ''' parser_context = parser_context or {} request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) meta = request.META.copy() meta['CONTENT_TYPE'] = media_type upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding) data, files = parser.parse() return DataAndFiles(decamelize(data), decamelize(files), ) except MultiPartParserError as err: raise ParseError(f'Multipart form parse error - {err}')
def parse(self, stream, media_type=None, parser_context=None): parser_context = parser_context or {} request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) meta = request.META.copy() meta['CONTENT_TYPE'] = media_type upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding) data, files = parser.parse() result = parsers.DataAndFiles(data, files) data = {} data = json.loads(result.data["data"]) for key, value in result.files.items(): data = compile_payload(data, key.split("."), value) return data except MultiPartParserError as exc: raise ParseError('Multipart form parse error - %s' % six.text_type(exc))
def parse_file_upload(self, META, post_data): """Return a tuple of (POST QueryDict, FILES MultiValueDict).""" # self 是「请求对象」 self.upload_handlers = ImmutableList( self.upload_handlers, warning= "You cannot alter upload handlers after the upload has been processed." ) print('【django.http.request.HttpRequest.parse_file_upload】' '「请求对象」创建「请求表单解析对象」并调用其 parse 方法') # 此类定义在 django.http.multipartparser 模块中,其实例被称为「请求表单解析对象」 # META 是包含请求头信息的字典对象 # post_data 是 self ,也就是「请求对象」 # self.upload_handlers 是元组,里面是一些文件上传相关的对象 # self.encoding 是字符串或 None TODO parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) # 调用「请求表单解析对象」的 parse 方法 # 处理请求体中的表单数据和文件数据,生成两个类字典对象并返回 return parser.parse()
def test_negative_content_length(self): with self.assertRaisesMessage(MultiPartParserError, "Invalid content length: -1"): MultiPartParser( { "CONTENT_TYPE": "multipart/form-data; boundary=_foo", "CONTENT_LENGTH": -1, }, StringIO("x"), [], "utf-8", )
def test_invalid_content_type(self): with self.assertRaisesMessage(MultiPartParserError, "Invalid Content-Type: text/plain"): MultiPartParser( { "CONTENT_TYPE": "text/plain", "CONTENT_LENGTH": "1", }, StringIO("x"), [], "utf-8", )
def test_empty_upload_handlers(self): # We're not actually parsing here; just checking if the parser properly # instantiates with empty upload handlers. MultiPartParser( { "CONTENT_TYPE": "multipart/form-data; boundary=_foo", "CONTENT_LENGTH": "1", }, StringIO("x"), [], "utf-8", )
def parse_multipart_data(request): """ Parse a request with multipart data. :param request: A HttpRequest instance. """ return MultiPartParser( META=request.META, input_data=StringIO(request.body), upload_handlers=request.upload_handlers, encoding=request.encoding ).parse()
def _parse_request_data(request): # Try parsing one of the supported content types from the request try: content_type = request.META.get('CONTENT_TYPE', '') if content_type.startswith('application/json'): return (json.loads(request.body), None, None) elif content_type.startswith('multipart/form-data'): parser = MultiPartParser(request.META, StringIO(request.body), request.upload_handlers, request.encoding) data, files = parser.parse() return (data, files, None) else: return (None, None, _bad_request( _("Unsupported content-type: %s") % content_type)) except Exception, e: return (None, None, _bad_request(_("Request parsing error: %s") % e))
def parse_body(r, request): if request.method == 'POST' or request.method == 'PUT': # Parse out profiles/states if the POST dict is not empty if 'multipart/form-data' in request.META['CONTENT_TYPE']: if request.POST.dict().keys(): r['params'].update(request.POST.dict()) parser = MultiPartParser(request.META, StringIO.StringIO(request.raw_post_data),request.upload_handlers) post, files = parser.parse() r['files'] = files # If it is multipart/mixed, parse out all data elif 'multipart/mixed' in request.META['CONTENT_TYPE']: parse_attachment(r, request) # Normal POST/PUT data else: if request.body: # profile uses the request body r['raw_body'] = request.body # Body will be some type of string, not necessarily JSON r['body'] = convert_to_dict(request.body) else: raise BadRequest("No body in request") return r
def __init__(self, request): super().__init__(request.environ) self._request = request self.headers = { header_key: header_value for header_key, header_value in self._request.META.items() if "HTTP_" in header_key } self.url_parameters = self._request.GET.dict() self.body_parameters = {} self.body_files = {} if self._request.method == BaseRequest.NonSimpleMethod.POST.value or self._request.method == BaseRequest.NonSimpleMethod.PUT.value: if self._request.content_type == BaseRequest.ContentType.MULTIPART.value: parameters, files = MultiPartParser( self._request.META, self._request, self._request.upload_handlers).parse() self.body_parameters = { key: value.replace("\r\n--", "") for key, value in parameters.items() } self.body_files = files.dict() popable_items = [] for file_name, file in self.body_files.items(): if file.content_type == BaseRequest.ContentType.JSON.value: file_content = file.read().decode("utf-8") dict_from_content = json.loads(file_content) self.body_parameters[file_name] = dict_from_content popable_items.append(file_name) for item in popable_items: del self.body_files[item] elif self._request.content_type == BaseRequest.ContentType.TEXTPLAIN.value: file = ContentFile(request.body) self.body_files = {"empty": file} elif self._request.content_type == BaseRequest.ContentType.URLENCODED.value: self.body_parameters = QueryDict(request.body).dict() elif self._request.content_type == BaseRequest.ContentType.JSON.value: self.body_parameters = json.loads(request.body)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse: # Adapted from django/http/__init__.py. # So by default Django doesn't populate request.POST for anything besides # POST requests. We want this dict populated for PATCH/PUT, so we have to # do it ourselves. # # This will not be required in the future, a bug will be filed against # Django upstream. if not request.POST: # Only take action if POST is empty. if request.content_type == "multipart/form-data": POST, _files = MultiPartParser( request.META, BytesIO(request.body), request.upload_handlers, request.encoding, ).parse() # request.POST is an immutable QueryDict in most cases, while # MultiPartParser.parse() returns a mutable instance of QueryDict. # This can be fix when https://code.djangoproject.com/ticket/17235 # is resolved. # django-stubs makes QueryDict of different mutabilities incompatible # types. There is no way to acknowledge the django-stubs mypy plugin # the change of POST's mutability, so we bypass the check with cast. # See also: https://github.com/typeddjango/django-stubs/pull/925#issue-1206399444 POST._mutable = False request.POST = cast("_ImmutableQueryDict", POST) # Note that request._files is just the private attribute that backs the # FILES property, so we are essentially setting request.FILES here. (In # Django 3.2 FILES was still a read-only property.) setattr(request, "_files", _files) elif request.content_type == "application/x-www-form-urlencoded": request.POST = QueryDict(request.body, encoding=request.encoding) return view_func(request, *args, **kwargs)
def process_request(self,req): """Interpret POST variables that indicate fake file uploads.""" # Bail out if any real files were uploaded if len(req.FILES) > 0: return None # Find any post variables named like "fakefile_*". # These contain the fake files that are to be uploaded. fakefiles = [] for (k,v) in req.POST.iteritems(): if k.startswith(self.field_name): if v == "": continue fakefiles.append((k[len(self.field_name):],self.file_spec[v])) if not fakefiles: return None # Remove the fakefile keys from POST for f in fakefiles: del req.POST[self.field_name + f[0]] # Construct a fake request body and META object fake_data = FakeFilesData(fakefiles) fake_meta = MergeDict(fake_data.META,req.META) # Re-parse the fake data, triggering upload handlers etc. parser = MultiPartParser(fake_meta,fake_data,req.upload_handlers,req.encoding) (_, req._files) = parser.parse()
def parse_multipart_data(request): body = None if isinstance(request.body, (bytes, bytearray)): body = BytesIO(request.body) elif isinstance(request.body, (bytes, bytearray)): body = StringIO(request.body) if body: return MultiPartParser(META=request.META, input_data=body, upload_handlers=request.upload_handlers, encoding=request.encoding).parse() else: return None
def process_request(self, req): """Interpret POST variables that indicate fake file uploads.""" # Bail out if any real files were uploaded if len(req.FILES) > 0: return None # Find any post variables named like "fakefile_*". # These contain the fake files that are to be uploaded. fakefiles = [] for (k, v) in req.POST.iteritems(): if k.startswith(self.field_name): if v == "": continue fakefiles.append((k[len(self.field_name):], self.file_spec[v])) if not fakefiles: return None # Remove the fakefile keys from POST for f in fakefiles: del req.POST[self.field_name + f[0]] # Construct a fake request body and META object fake_data = FakeFilesData(fakefiles) fake_meta = MergeDict(fake_data.META, req.META) # Re-parse the fake data, triggering upload handlers etc. parser = MultiPartParser(fake_meta, fake_data, req.upload_handlers, req.encoding) (_, req._files) = parser.parse()
def parse(self, stream, media_type=None, parser_context=None): """Parse the incoming bytestream. Parses the incoming bytestream as a multipart encoded form and returns a DataAndFiles object. `.data` will be a `QueryDict` containing all the form parameters, and JSON decoded where available. `.files` will be a `QueryDict` containing all the form files. :param (bytes) stream: Incoming byte stream. :param (str) media_type: Media Type. :param (dict) parser_context: Context. """ parser_context = parser_context or {} request = parser_context["request"] encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET) meta = request.META.copy() meta["CONTENT_TYPE"] = media_type upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding) data, files = parser.parse() # get a dict of data to make it mutable _data = data.dict() for key in _data: if _data[key]: try: _data[key] = json.loads(_data[key]) except ValueError: pass return DataAndFiles(_data, files) except MultiPartParserError as exc: raise ParseError("Multipart form parse error - %s" % str(exc))
def parse_body(r, request): if request.method == 'POST' or request.method == 'PUT': # Parse out profiles/states if the POST dict is not empty if 'multipart/form-data' in request.META['CONTENT_TYPE']: if request.POST.dict().keys(): r['params'].update(request.POST.dict()) parser = MultiPartParser( request.META, StringIO.StringIO(request.raw_post_data), request.upload_handlers) post, files = parser.parse() r['files'] = files # If it is multipart/mixed, parse out all data elif 'multipart/mixed' in request.META['CONTENT_TYPE']: parse_attachment(r, request) # Normal POST/PUT data else: if request.body: # profile uses the request body r['raw_body'] = request.body # Body will be some type of string, not necessarily JSON r['body'] = convert_to_dict(request.body) else: raise BadRequest("No body in request") return r
def __call__(self, request): if hasattr(request, 'POST') and not request.POST: try: request.POST = ujson.loads(request.body) except Exception: pass if request.method == 'PUT': try: body_tuple = MultiPartParser(request.META, request, request.upload_handlers).parse() request.PUT = body_tuple[0] request.FILES.update(body_tuple[1]) except Exception: pass response = self.get_response(request) return response
def pessoa(request,pessoaid): if (request.method == 'GET'): # recupera uma pessoa específica na base de dados db_pessoa = Pessoa.objects.aperson(pessoaid=pessoaid).values() # cria payload com a pessoa payload = { 'pessoa' : list(db_pessoa) } # retorno em Json #return JsonResponse(payload) # retorno via template template = loader.get_template('pessoa/detalharpessoa.html') return HttpResponse(template.render(payload, request)) if (request.method == 'DELETE'): db_pessoa = Pessoa.objects.aperson(pessoaid=pessoaid) db_pessoa.delete() return HttpResponse("Excluido com sucesso!") if (request.method == 'PUT'): # dados enviados via Postman - Body - form-data put_data = MultiPartParser(request.META, request, request.upload_handlers).parse() alldata = put_data[0] # thanks to: https://stackoverflow.com/questions/44927998/how-to-access-data-form-in-put-request-of-class-based-views-in-django # recupera pessoa no banco de dados db_pessoa = Pessoa.objects.get(id=pessoaid) # atualiza dados conforme parâmetro da requisição db_pessoa.nome = alldata.get("nome", "0") db_pessoa.sobrenome = alldata.get("sobrenome", "0") db_pessoa.idade = alldata.get("idade", "0") db_pessoa.cpf = alldata.get("cpf","0") db_pessoa.sexo = alldata.get("sexo","0") db_pessoa.deptoatual_id = alldata.get("depto_atual","0") # salva pessoa alterada no banco db_pessoa.save() return HttpResponse("Atualizado com sucesso!")
def _check(request, *args, **kwargs): if request.method in ('GET', 'POST'): user_id = request.GET.get( 'user_id') if request.method == 'GET' else request.POST.get( 'user_id') else: user_id = MultiPartParser( request.META, request, request.upload_handlers).parse()[0]['user_id'] try: Users.objects.get(user_id=user_id) except Exception as e: return JsonResponse({ 'code': status.HTTP_500_INTERNAL_SERVER_ERROR, 'err_msg': f'{e}' }) else: return fn(request, *args, **kwargs)
def process_request(self, request): method = request.method if 'application/json' in request.META['CONTENT_TYPE']: try: data = json.loads(request.body.decode()) except Exception as e: return parma_error({'body': 'request data type is error'}) elif 'multipart/form-data' in request.META['CONTENT_TYPE']: data, files = MultiPartParser(request.META, request, request.upload_handlers).parse() else: data = request.GET files = None if 'HTTP_X_METHOD' in request.META: method = request.META('HTTP_X_METHOD').upper() setattr(request, 'method', method) if files: setattr(request, '{method}_FILES'.format(method=method), files) setattr(request, method, data)
def _wrapped_view_func(request, *args, **kwargs): # Adapted from django/http/__init__.py. # So by default Django doesn't populate request.POST for anything besides # POST requests. We want this dict populated for PATCH/PUT, so we have to # do it ourselves. # # This will not be required in the future, a bug will be filed against # Django upstream. if not request.POST: # Only take action if POST is empty. if request.META.get('CONTENT_TYPE', '').startswith('multipart'): # Note that request._files is just the private attribute that backs the # FILES property, so we are essentially setting request.FILES here. (In # Django 1.5 FILES was still a read-only property.) request.POST, request._files = MultiPartParser(request.META, StringIO(request.body), request.upload_handlers, request.encoding).parse() else: request.POST = QueryDict(request.body, encoding=request.encoding) return view_func(request, *args, **kwargs)
def addImg(request): request.POST, request._files = MultiPartParser( request.META, BytesIO(request.body), request.upload_handlers, request.encoding, ).parse() title = request.POST.get("title") discription = request.POST.get("discription") file = request.FILES.get("file") if isinstance(file, UploadedFile): file_type = magic.from_buffer(file.read(), mime=True) if not file_type in ["image/gif", "image/png", "image/jpeg"]: return JsonResponse({ "status": "failed", "text": "File Type Not Supported" }) if file.size > 1000000: return JsonResponse({ "status": "failed", "text": "File Is Bigger Than 1Mg" }) newAlbum = Albums(Title=title, img=file, description=discription) newAlbum.save() return JsonResponse({ "status": "success", "text": "Your Album Saved Successfully" }) else: return JsonResponse({ "status": "failed", "text": "Something Went Wrong, Try Again Later Plz" })
def process_request(self, request): method = request.method if "application/json" in request.content_type: try: data = json.loads(request.body.decode()) files = None except Exception as e: return HttpResponse(json.dumps({ "status": 422, "msg": "参数错误", }), content_type="application/json") elif "multipart/form-data" in request.content_type: data, files = MultiPartParser(request.META, request, request.upload_handlers).parse() else: data = request.GET files = None if "HTTP_X_METHOD" in request.META: method = request.META["HTTP_X_METHOD"].upper() setattr(request, "method", method) if files: setattr(request, "{method}_FILES".format(method=method), files) setattr(request, method, data)
def process_request(self, request): method = request.method if 'application/json' in request.content_type: #把客户端上传的json数据转化为python字典 try: data = json.loads(request.body.decode()) files = None except Exception as e: return params_error({'body': '请求的数据类型不正确'}) elif 'multipart/form-data' in request.content_type: data, files = MultiPartParser(request.META, request, request.upload_handlers).parse() else: data = request.GET files = None if 'HTTP_X_METHOD' in request.META: method = request.META['HTTP_X_METHOD'].upper() setattr(request, 'method', method) if files: setattr(request, '{method}_FILES'.format(method=method), files) setattr(request, method, data)
from __future__ import unicode_literals
def post(self, request, project, project_config, **kwargs): # Minidump request payloads do not have the same structure as usual # events from other SDKs. The minidump can either be transmitted as # request body, or as `upload_file_minidump` in a multipart formdata # request. Optionally, an event payload can be sent in the `sentry` form # field, either as JSON or as nested form data. request_files = request.FILES or {} content_type = request.META.get("CONTENT_TYPE") if content_type in self.dump_types: minidump = io.BytesIO(request.body) minidump_name = "Minidump" data = {} else: minidump = request_files.get("upload_file_minidump") minidump_name = minidump and minidump.name or None if any(key.startswith("sentry[") for key in request.POST): # First, try to parse the nested form syntax `sentry[key][key]` # This is required for the Breakpad client library, which only # supports string values of up to 64 characters. extra = parser.parse(request.POST.urlencode()) data = extra.pop("sentry", {}) else: # Custom clients can submit longer payloads and should JSON # encode event data into the optional `sentry` field. extra = request.POST.dict() json_data = extra.pop("sentry", None) try: data = json.loads(json_data) if json_data else {} except ValueError: data = {} if not isinstance(data, dict): data = {} # Merge additional form fields from the request with `extra` data # from the event payload and set defaults for processing. This is # sent by clients like Breakpad or Crashpad. extra.update(data.get("extra", {})) data["extra"] = extra if not minidump: track_outcome( project_config.organization_id, project_config.project_id, None, Outcome.INVALID, "missing_minidump_upload", ) raise APIError("Missing minidump upload") # Breakpad on linux sometimes stores the entire HTTP request body as # dump file instead of just the minidump. The Electron SDK then for # example uploads a multipart formdata body inside the minidump file. # It needs to be re-parsed, to extract the actual minidump before # continuing. minidump.seek(0) if minidump.read(2) == b"--": # The remaining bytes of the first line are the form boundary. We # have already read two bytes, the remainder is the form boundary # (excluding the initial '--'). boundary = minidump.readline().rstrip() minidump.seek(0) # Next, we have to fake a HTTP request by specifying the form # boundary and the content length, or otherwise Django will not try # to parse our form body. Also, we need to supply new upload # handlers since they cannot be reused from the current request. meta = { "CONTENT_TYPE": b"multipart/form-data; boundary=%s" % boundary, "CONTENT_LENGTH": minidump.size, } handlers = [ uploadhandler.load_handler(handler, request) for handler in settings.FILE_UPLOAD_HANDLERS ] _, inner_files = MultiPartParser(meta, minidump, handlers).parse() try: minidump = inner_files["upload_file_minidump"] minidump_name = minidump.name except KeyError: track_outcome( project_config.organization_id, project_config.project_id, None, Outcome.INVALID, "missing_minidump_upload", ) raise APIError("Missing minidump upload") minidump.seek(0) if minidump.read(4) != "MDMP": track_outcome( project_config.organization_id, project_config.project_id, None, Outcome.INVALID, "invalid_minidump", ) raise APIError("Uploaded file was not a minidump") # Always store the minidump in attachments so we can access it during # processing, regardless of the event-attachments feature. This is # required to process the minidump with debug information. attachments = [] # The minidump attachment is special. It has its own attachment type to # distinguish it from regular attachments for processing. Also, it might # not be part of `request_files` if it has been uploaded as raw request # body instead of a multipart formdata request. minidump.seek(0) attachments.append( CachedAttachment( name=minidump_name, content_type="application/octet-stream", data=minidump.read(), type=MINIDUMP_ATTACHMENT_TYPE, ) ) # Append all other files as generic attachments. # RaduW 4 Jun 2019 always sent attachments for minidump (does not use # event-attachments feature) for name, file in six.iteritems(request_files): if name == "upload_file_minidump": continue # Known attachment: msgpack event if name == "__sentry-event": merge_attached_event(file, data) continue if name in ("__sentry-breadcrumb1", "__sentry-breadcrumb2"): merge_attached_breadcrumbs(file, data) continue # Add any other file as attachment attachments.append(CachedAttachment.from_upload(file)) # Assign our own UUID so we can track this minidump. We cannot trust # the uploaded filename, and if reading the minidump fails there is # no way we can ever retrieve the original UUID from the minidump. event_id = data.get("event_id") or uuid.uuid4().hex data["event_id"] = event_id # Write a minimal event payload that is required to kick off native # event processing. It is also used as fallback if processing of the # minidump fails. # NB: This occurs after merging attachments to overwrite potentially # contradicting payloads transmitted in __sentry_event. write_minidump_placeholder(data) event_id = self.process( request, attachments=attachments, data=data, project=project, project_config=project_config, **kwargs ) # Return the formatted UUID of the generated event. This is # expected by the Electron http uploader on Linux and doesn't # break the default Breakpad client library. return HttpResponse(six.text_type(uuid.UUID(event_id)), content_type="text/plain")
def post(self, request, project, **kwargs): # Minidump request payloads do not have the same structure as # usual events from other SDKs. Most notably, the event needs # to be transfered in the `sentry` form field. All other form # fields are assumed "extra" information. The only exception # to this is `upload_file_minidump`, which contains the minidump. if any(key.startswith('sentry[') for key in request.POST): # First, try to parse the nested form syntax `sentry[key][key]` # This is required for the Breakpad client library, which only # supports string values of up to 64 characters. extra = parser.parse(request.POST.urlencode()) data = extra.pop('sentry', {}) else: # Custom clients can submit longer payloads and should JSON # encode event data into the optional `sentry` field. extra = request.POST json_data = extra.pop('sentry', None) data = json.loads(json_data[0]) if json_data else {} # Merge additional form fields from the request with `extra` # data from the event payload and set defaults for processing. extra.update(data.get('extra', {})) data['extra'] = extra # Assign our own UUID so we can track this minidump. We cannot trust the # uploaded filename, and if reading the minidump fails there is no way # we can ever retrieve the original UUID from the minidump. event_id = data.get('event_id') or uuid.uuid4().hex data['event_id'] = event_id # At this point, we only extract the bare minimum information # needed to continue processing. This requires to process the # minidump without symbols and CFI to obtain an initial stack # trace (most likely via stack scanning). If all validations # pass, the event will be inserted into the database. try: minidump = request.FILES['upload_file_minidump'] except KeyError: raise APIError('Missing minidump upload') # Breakpad on linux sometimes stores the entire HTTP request body as # dump file instead of just the minidump. The Electron SDK then for # example uploads a multipart formdata body inside the minidump file. # It needs to be re-parsed, to extract the actual minidump before # continuing. minidump.seek(0) if minidump.read(2) == b'--': # The remaining bytes of the first line are the form boundary. We # have already read two bytes, the remainder is the form boundary # (excluding the initial '--'). boundary = minidump.readline().rstrip() minidump.seek(0) # Next, we have to fake a HTTP request by specifying the form # boundary and the content length, or otherwise Django will not try # to parse our form body. Also, we need to supply new upload # handlers since they cannot be reused from the current request. meta = { 'CONTENT_TYPE': b'multipart/form-data; boundary=%s' % boundary, 'CONTENT_LENGTH': minidump.size, } handlers = [ uploadhandler.load_handler(handler, request) for handler in settings.FILE_UPLOAD_HANDLERS ] _, files = MultiPartParser(meta, minidump, handlers).parse() try: minidump = files['upload_file_minidump'] except KeyError: raise APIError('Missing minidump upload') if minidump.size == 0: raise APIError('Empty minidump upload received') if settings.SENTRY_MINIDUMP_CACHE: if not os.path.exists(settings.SENTRY_MINIDUMP_PATH): os.mkdir(settings.SENTRY_MINIDUMP_PATH, 0o744) with open('%s/%s.dmp' % (settings.SENTRY_MINIDUMP_PATH, event_id), 'wb') as out: for chunk in minidump.chunks(): out.write(chunk) # Always store the minidump in attachments so we can access it during # processing, regardless of the event-attachments feature. This will # allow us to stack walk again with CFI once symbols are loaded. attachments = [] minidump.seek(0) attachments.append( CachedAttachment.from_upload(minidump, type=MINIDUMP_ATTACHMENT_TYPE)) # Append all other files as generic attachments. We can skip this if the # feature is disabled since they won't be saved. if features.has('organizations:event-attachments', project.organization, actor=request.user): for name, file in six.iteritems(request.FILES): if name != 'upload_file_minidump': attachments.append(CachedAttachment.from_upload(file)) try: state = process_minidump(minidump) merge_process_state_event(data, state) except ProcessMinidumpError as e: minidumps_logger.exception(e) raise APIError(e.message.split('\n', 1)[0]) response_or_event_id = self.process(request, attachments=attachments, data=data, project=project, **kwargs) if isinstance(response_or_event_id, HttpResponse): return response_or_event_id # Return the formatted UUID of the generated event. This is # expected by the Electron http uploader on Linux and doesn't # break the default Breakpad client library. return HttpResponse(six.text_type(uuid.UUID(response_or_event_id)), content_type='text/plain')
def test_bad_type_content_length(self): multipart_parser = MultiPartParser({ 'CONTENT_TYPE': 'multipart/form-data; boundary=_foo', 'CONTENT_LENGTH': 'a', }, StringIO('x'), [], 'utf-8') self.assertEqual(multipart_parser._content_length, 0)
def test_negative_content_length(self): with self.assertRaisesMessage(MultiPartParserError, 'Invalid content length: -1'): MultiPartParser({ 'CONTENT_TYPE': 'multipart/form-data; boundary=_foo', 'CONTENT_LENGTH': -1, }, StringIO('x'), [], 'utf-8')
def test_invalid_content_type(self): with self.assertRaisesMessage(MultiPartParserError, 'Invalid Content-Type: text/plain'): MultiPartParser({ 'CONTENT_TYPE': 'text/plain', 'CONTENT_LENGTH': '1', }, StringIO('x'), [], 'utf-8')
def _document_PUT(request, document_slug, document_locale): """Handle PUT requests as document write API""" # Try parsing one of the supported content types from the request try: content_type = request.META.get('CONTENT_TYPE', '') if content_type.startswith('application/json'): data = json.loads(request.body) elif content_type.startswith('multipart/form-data'): parser = MultiPartParser(request.META, StringIO(request.body), request.upload_handlers, request.encoding) data, files = parser.parse() elif content_type.startswith('text/html'): # TODO: Refactor this into wiki.content ? # First pass: Just assume the request body is an HTML fragment. html = request.body data = dict(content=html) # Second pass: Try parsing the body as a fuller HTML document, # and scrape out some of the interesting parts. try: doc = pq(html) head_title = doc.find('head title') if head_title.length > 0: data['title'] = head_title.text() body_content = doc.find('body') if body_content.length > 0: data['content'] = body_content.html() except Exception: pass else: resp = HttpResponse() resp.status_code = 400 resp.content = ugettext( "Unsupported content-type: %s") % content_type return resp except Exception as e: resp = HttpResponse() resp.status_code = 400 resp.content = ugettext("Request parsing error: %s") % e return resp try: # Look for existing document to edit: doc = Document.objects.get(locale=document_locale, slug=document_slug) if not doc.allows_revision_by(request.user): raise PermissionDenied section_id = request.GET.get('section', None) is_new = False # Use ETags to detect mid-air edit collision # see: http://www.w3.org/1999/04/Editing/ expected_etag = request.META.get('HTTP_IF_MATCH', False) if expected_etag: curr_etag = doc.calculate_etag(section_id) if curr_etag != expected_etag: resp = HttpResponse() resp.status_code = 412 resp.content = ugettext('ETag precondition failed') return resp except Document.DoesNotExist: # No existing document, so this is an attempt to create a new one... if not Document.objects.allows_add_by(request.user, document_slug): raise PermissionDenied # TODO: There should be a model utility for creating a doc... # Let's see if this slug path implies a parent... slug_parts = split_slug(document_slug) if not slug_parts['parent']: # Apparently, this is a root page! parent_doc = None else: # There's a parent implied, so make sure we can find it. parent_doc = get_object_or_404(Document, locale=document_locale, slug=slug_parts['parent']) # Create and save the new document; we'll revise it immediately. doc = Document(slug=document_slug, locale=document_locale, title=data.get('title', document_slug), parent_topic=parent_doc) doc.save() section_id = None # No section editing for new document! is_new = True new_rev = doc.revise(request.user, data, section_id) doc.schedule_rendering('max-age=0') request.authkey.log(is_new and 'created' or 'updated', new_rev, data.get('summary', None)) resp = HttpResponse() if not is_new: resp.content = 'RESET' resp.status_code = 205 else: resp.content = 'CREATED' new_loc = request.build_absolute_uri(doc.get_absolute_url()) resp['Location'] = new_loc resp.status_code = 201 return resp