def parse(self, stream, media_type=None, parser_context=None): result = parsers.DataAndFiles, super().parse( stream, media_type=media_type, parser_context=parser_context) _data_keys: Set[str] = set(result.data.keys()) _file_keys: Set[str] = set(result.files.keys()) _intersect = _file_keys.intersection(_data_keys) if len(_intersect) > 0: raise ValidationError('files and data had intersection on keys: ' + str(_intersect)) # merge everything together merged = QueryDict(mutable=True) merged.update(result.data) merged.update(result.files) # type: ignore # decode it together decoded_merged = variable_decode(merged) parser_context['__JSON_AS_STRING__'] = True if len(result.files) > 0: # if we had at least one file put everything into files so we # later know we had at least one file by running len(request.FILES) parser_context['request'].META['REQUEST_HAD_FILES'] = True return parsers.DataAndFiles(decoded_merged, {}) # type: ignore else: # just put it into data, doesnt matter really otherwise return parsers.DataAndFiles(decoded_merged, {}) # type: ignore
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = {} # print('IN-PARSER:', result.data) # data = json.loads(result.data["data"]) # print({"products": data}) # for case1 with nested serializers # parse each field with json print('*' * 5, 'IN-PARSER', '*' * 5) for key, value in result.data.items(): print(type(value), value) if type(value) != str: data[key] = value continue if '{' in value or "[" in value: try: # print(type(json.loads(value)), json.loads(value)) data[key] = json.loads(value) # print() except ValueError: data[key] = value else: data[key] = value qdict = QueryDict('', mutable=True) qdict.update(data) return parsers.DataAndFiles(qdict, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = {} # for case1 with nested serializers # parse each field with json for key, value in result.data.items(): if type(value) != str: data[key] = value continue if '{' in value or "[" in value: try: data[key] = json.loads(value) except ValueError: data[key] = value else: data[key] = value # for case 2 # find the data field and parse it ##data = json.loads(result.data["data"]) qdict = QueryDict('', mutable=True) qdict.update(data) return parsers.DataAndFiles(qdict, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream=stream, media_type=media_type, parser_context=parser_context) data = {} print(result.data) for key, value in result.data.items(): if '.' in key: dict_key = key.split('.') nested_dict_key = dict_key[0] nested_value_key = dict_key[1] if nested_dict_key not in data: data[nested_dict_key] = {} data[nested_dict_key][nested_value_key] = value # if nested_value_key not in data: # data[nested_dict_key] = {} # data[nested_dict_key][nested_value_key] = value # else: # data[key] = value else: data[key] = value return parsers.DataAndFiles(data, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = json.loads(result.data["data"]) qdict = QueryDict('', mutable=True) qdict.update(data) return parsers.DataAndFiles(qdict, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = variable_decode(result.data) qdict = QueryDict('', mutable=True) qdict.update(data) data_and_files = parsers.DataAndFiles(qdict, result.files) return data_and_files
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream=stream, media_type=media_type, parser_context=parser_context) data = {} for key, value in result.data.items(): if "[" in value and "]" in value: # nested data[key] = json.loads(value) else: data[key] = value return parsers.DataAndFiles(data, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream=stream, media_type=media_type, parser_context=parser_context) data = {} for key, value in result.data.items(): analyze_key(key, value, data) for key, value in result.files.items(): analyze_key(key, value, data) print(data) return parsers.DataAndFiles(data, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) model = None qdict = QueryDict('', mutable=True) if parser_context and 'view' in parser_context: model = parser_context['view'].get_serializer_class().Meta.model for key, value in result.data.items(): # Handle ManytoMany field data, parses lists of comma-separated integers that might be quoted. eg. "1,2" if isinstance(getattr(model, key), fields.related_descriptors.ManyToManyDescriptor): for val in value.split(','): qdict.update({key: val.strip('"')}) else: qdict.update({key: value}) return parsers.DataAndFiles(qdict, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = json.loads(result.data["data"]) # Flatten the result.files MultiValueDict. # If the "files" is not flatten, the serializer would receive the data like this: # <MultiValueDict: {'file': [<InMemoryUploadedFile: filename.jpg (image/jpeg)>]}> # The serializer wouldn't find the file because the value is an array, not a single File files = {} for key in result.files.keys(): files[key] = result.files.get(key) return parsers.DataAndFiles(data, files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = {} for key, value in result.data.items(): if type(value) != str: data[key] = value continue if '{' in value or "[" in value: try: data[key] = json.loads(value) except ValueError: data[key] = value else: data[key] = value return parsers.DataAndFiles(data, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream=stream, media_type=media_type, parser_context=parser_context) data = {} for key, value in result.data.items(): if '[' in key and ']' in key: # nested index_left_bracket = key.index('[') index_right_bracket = key.index(']') nested_dict_key = key[:index_left_bracket] nested_value_key = key[index_left_bracket + 1:index_right_bracket] if nested_dict_key not in data: data[nested_dict_key] = {} data[nested_dict_key][nested_value_key] = value else: data[key] = value return parsers.DataAndFiles(data, result.files)
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream=stream, media_type=media_type, parser_context=parser_context) print(result.files) data = {} for key, value in result.data.items(): if '[' in key and ']' in key: # print(key, ' and ', value) keys = self.get_nested_keys(key) if len(keys) == 3: nested_dict_key1 = keys[0] nested_dict_key2 = keys[1] nested_dict_key3 = keys[2] if nested_dict_key2.isdigit(): # array of data index = int(nested_dict_key2) if nested_dict_key1 not in data: data[nested_dict_key1] = [] data[nested_dict_key1].append( {nested_dict_key3: value}) elif index < len(data[nested_dict_key1]): data[nested_dict_key1][index].update( {nested_dict_key3: value}) else: data[nested_dict_key1].append( {nested_dict_key3: value}) # TODO len(keys) == 2: else: data[key] = value # print(data) """ If pass only data, file field returned error as "no file was submitted" If QueryDict.dict() used, it works """ # TODO investigate for this issue q_dict = QueryDict('', mutable=True) q_dict.update(data) # print('==================+++++==============*****+========================') # print(q_dict) return parsers.DataAndFiles(q_dict.dict(), result.files)
def parse(self, stream, media_type=None, parser_context=None): parser_context = parser_context or {} request = parser_context['request'] encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) meta = request.META.copy() meta['CONTENT_TYPE'] = media_type upload_handlers = request.upload_handlers try: parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding) data, files = parser.parse() result = parsers.DataAndFiles(data, files) data = {} data = json.loads(result.data["data"]) for key, value in result.files.items(): data = compile_payload(data, key.split("."), value) return data except MultiPartParserError as exc: raise ParseError('Multipart form parse error - %s' % six.text_type(exc))
def parse(self, stream, media_type=None, parser_context=None): result = super().parse( stream, media_type=media_type, parser_context=parser_context ) data = {} for key, value in result.data.items(): if type(value) != str: data[key] = value continue if '{' in value or "[" in value: try: data[key] = json.loads(value) except ValueError: data[key] = value else: data[key] = value return parsers.DataAndFiles(data, result.files) #GDAL @ file:///C:/Users/moise/Downloads/GDAL-3.1.2-cp37-cp37m-win_amd64.whl
def parse(self, stream, media_type=None, parser_context=None): result = super().parse(stream, media_type=media_type, parser_context=parser_context) data = variable_decode(result.data) return parsers.DataAndFiles(data, result.files)