def parse_control_data(control_data, continue_line=' ', split=': ', skip_after_blank=False): offset = len(continue_line) result_data = QueryDict('', mutable=True) key, value = None, None description = '' add_to_description = False for line in control_data.splitlines(): if not line.split() and skip_after_blank: add_to_description = True if add_to_description: description += "\n" description += line continue if not line or line[0:offset] == continue_line: if key is not None: value += "\n" value += line[offset:] else: if key is not None: if key not in result_data: result_data.setlist(key, [value]) else: result_data.appendlist(key, value) key, value = line.split(split, 1) value = value.lstrip() if key is not None: if key not in result_data: result_data.setlist(key, [value]) else: result_data.appendlist(key, value) if add_to_description: result_data.setlist('description', [description]) return result_data
def urlparams(url_, hash=None, query_dict=None, **query): """ Add a fragment and/or query parameters to a URL. New query params will be appended to exising parameters, except duplicate names, which will be replaced. """ url_ = urllib.parse.urlparse(url_) fragment = hash if hash is not None else url_.fragment q = url_.query new_query_dict = QueryDict( smart_bytes(q), mutable=True) if q else QueryDict("", mutable=True) if query_dict: for k, l in query_dict.lists(): new_query_dict[k] = None # Replace, don't append. for v in l: new_query_dict.appendlist(k, v) for k, v in list(query.items()): new_query_dict[k] = v # Replace, don't append. query_string = urlencode([(k, v) for k, l in new_query_dict.lists() for v in l if v is not None]) new = urllib.parse.ParseResult(url_.scheme, url_.netloc, url_.path, url_.params, query_string, fragment) return new.geturl()
def _get_initial_query_dict(query_dict): if not query_dict: return QueryDict(None, mutable=True) if isinstance(query_dict, QueryDict): _query_dict = query_dict.copy() return _query_dict if isinstance(query_dict, str): if query_dict.startswith("?"): query_dict = query_dict[1:] return QueryDict(query_dict, mutable=True) # Accept any old dict or list of pairs. try: pairs = list(query_dict.items()) except Exception: # noqa pairs = query_dict query_dict = QueryDict(None, mutable=True) # Enter each pair into QueryDict object: try: for key, val in pairs: # Convert values to unicode so that detecting # membership works for numbers. if isinstance(val, (list, tuple)): for e in val: query_dict.appendlist(key, str(e)) else: query_dict.appendlist(key, str(val)) except Exception: # noqa pass return query_dict
def urlparams(url_, fragment=None, query_dict=None, **query): """ Add a fragment and/or query parameters to a URL. New query params will be appended to exising parameters, except duplicate names, which will be replaced. """ url_ = urlparse.urlparse(url_) fragment = fragment if fragment is not None else url_.fragment q = url_.query new_query_dict = QueryDict(smart_str(q), mutable=True) if q else QueryDict("", mutable=True) if query_dict: for k, l in query_dict.lists(): new_query_dict[k] = None # Replace, don't append. for v in l: new_query_dict.appendlist(k, v) for k, v in query.items(): # Replace, don't append. if isinstance(v, list): new_query_dict.setlist(k, v) else: new_query_dict[k] = v query_string = urlencode([(k, v) for k, l in new_query_dict.lists() for v in l if v is not None]) new = urlparse.ParseResult(url_.scheme, url_.netloc, url_.path, url_.params, query_string, fragment) return new.geturl()
def query_to_dict(query, existing_dict=None, prepared=False): """Takes custom query string and returns QueryDict""" if existing_dict is None: existing_dict = {} SINGLE_KEYS = {'paginate_by', 'sort_by'} save_filters = (SINGLE_KEYS & set(existing_dict.keys())) or prepared if existing_dict: d = existing_dict.copy() else: d = QueryDict(mutable=True) pairs = FIELDS_MATCH.split(query) for name_value in pairs: nv = name_value.split(':', 1) if len(nv) == 2: if (save_filters and (nv[0] not in SINGLE_KEYS or nv[0] not in d))\ or (not save_filters and nv[0] in SINGLE_KEYS): values = unquote(nv[1]) values = values.split('_') for v in values: d.appendlist(nv[0], v) return d
def _get_initial_query_dict(self, qdict): if not qdict: qdict = QueryDict(None, mutable=True) elif isinstance(qdict, QueryDict): qdict = qdict.copy() elif isinstance(qdict, basestring): if qdict.startswith('?'): qdict = qdict[1:] qdict = QueryDict(qdict, mutable=True) else: # Accept any old dict or list of pairs. try: pairs = qdict.items() except: pairs = qdict qdict = QueryDict(None, mutable=True) # Enter each pair into QueryDict object: try: for k, v in pairs: # Convert values to unicode so that detecting # membership works for numbers. if isinstance(v, (list, tuple)): for e in v: qdict.appendlist(k, unicode(e)) else: qdict.appendlist(k, unicode(v)) except: # Wrong data structure, qdict remains empty. pass return qdict
def get_refurl_context(self, name, value, attrs): attrs['data-url'] = resolve_url(self.refurl) if self.filters: query = QueryDict('', mutable=True) for k, v in self.filters.items(): if isinstance(v, (tuple, list)): v = ','.join(v) query.appendlist(k, v) query._mutable = False attrs['data-filters'] = query.urlencode() if isinstance(value, (tuple, list)): value = [x for x in value if x] context = {} context['widget'] = { 'name': name, 'is_hidden': self.is_hidden, 'required': self.is_required, 'value': self.format_value(value), 'attrs': self.build_attrs(self.attrs, attrs), 'template_name': self.template_name, } if self.allow_multiple_selected: context['widget']['attrs']['multiple'] = True if value: context['value'] = self.get_instance(value) return context
def _get_initial_query_dict(self, qdict): if not qdict: qdict = QueryDict(None, mutable=True) elif isinstance(qdict, QueryDict): qdict = qdict.copy() elif isinstance(qdict, basestring): if qdict.startswith('?'): qdict = qdict[1:] qdict = QueryDict(qdict, mutable=True) else: # Accept any old dict or list of pairs. try: pairs = qdict.items() except: pairs = qdict qdict = QueryDict(None, mutable=True) # Enter each pair into QueryDict object: try: for k, v in pairs: # Convert values to unicode so that detecting # membership works for numbers. if isinstance(v, (list, tuple)): for e in v: qdict.appendlist(k,unicode(e)) else: qdict.appendlist(k, unicode(v)) except: # Wrong data structure, qdict remains empty. pass return qdict
def query_builder(self, d): q = QueryDict(mutable=True) if isinstance(d, dict): q.update(d) else: for k, v in d: q.appendlist(k, v) return q.urlencode()
def redirect_back(query): parts = urlsplit(redirect_uri) origin_query = QueryDict(parts.query, mutable=True) origin_query.update(query) if state is not None: origin_query.appendlist('state', state) uri = urlunsplit(parts._replace(query=origin_query.urlencode())) return redirect(uri)
def form_entry_edit(request, form_entry_id, template='forms/form_entry_edit.html'): form_entry = get_object_or_404(FormEntry, id=form_entry_id) form_entry_fields = form_entry.fields.all() the_form = form_entry.form conversation_form_entry = form_entry.conversationformentry_set.all()[0] # enforce permissions, must have special right or be staff in current conversation to continue if not request.user.has_perm('questionnaire.can_revise_questionnaire') and not request.user == conversation_form_entry.conversation.getStaff().user: return redirect('/admin') if request.method == 'POST': args = (the_form, request.POST or None, request.FILES or None) form_for_form = FormForForm(*args) if form_for_form.is_valid(): # save new FormEntry, assign to ConversationFormEntry entry = form_for_form.save() conversation_form_entry.entry = entry conversation_form_entry.save() # delete old FormEntry and FieldEntry form_entry.fields.all().delete() form_entry.conversationformentry_set.clear() form_entry.delete() # redirect to conversation-detail-page, store conversation id before so it remains known return redirect('form_entry_edit', form_entry_id=entry.id) # convert FormEntry to QueryDict to initialize FormForForm (django-forms-builder doesnt support editing instances) # the reverse part to FormForForm.save(), there doesnt seem to be a general cover all cases approach data = QueryDict('', mutable=True) for entry_field in form_entry_fields: # skip if question to this entry does not exist anymore try: the_field = Field.objects.get(pk=entry_field.field_id) widget = WIDGETS.get(the_field.field_type) if widget == DoubleDropWidget: for i, val in enumerate(DoubleDropWidget().decompress(entry_field.value)): data['field_%s_%s' % (entry_field.field_id, i)] = val elif widget == CheckboxSelectMultiple: for val in entry_field.value.split(', '): data.appendlist('field_%s' % entry_field.field_id, val) else: data['field_%s' % entry_field.field_id] = entry_field.value except Field.DoesNotExist: pass args = (the_form, data, request.FILES or None) form_for_form = FormForForm(*args) return render_to_response(template, { 'form': the_form, "form_for_form": form_for_form, }, RequestContext(request))
def build_query(query): q = QueryDict('', mutable=True) if isinstance(query, dict): items = query.items() else: items = query for key, value in items: q.appendlist(key, force_str(value)) return q.urlencode()
def build_query(query): q = QueryDict('', mutable=True) if isinstance(query, dict): items = query.items() else: items = query for key, value in items: q.appendlist(key, force_text(value)) return q.urlencode()
def dict_to_querydict(dict_): """ Converts a dict value into the Django's QueryDict object. """ query_dict = QueryDict('', mutable=True) for name, value in dict_.items(): if isinstance(name, list): query_dict.setlist(name, value) else: query_dict.appendlist(name, value) query_dict._mutable = False return query_dict
def extract_data(arguments): query = QueryDict(mutable=True) for key, value in arguments: query.appendlist(key, value) kwargs = {} for key in set(query): if key.endswith('[]'): value = query.getlist(key) else: value = query.get(key) _set_value_on_path(kwargs, key, value) return kwargs
def render(self, context): try: GET = context.get('request').GET.copy() except AttributeError: GET = QueryDict("", mutable=True) for attr, op, val in self.triples: actual_attr = attr.resolve(context) try: actual_val = val.resolve(context) except: if val.var == "None": actual_val = None else: actual_val = val.var if actual_attr: if op == "=": if actual_val is None or actual_val == []: if GET.has_key(actual_attr): del GET[actual_attr] elif hasattr(actual_val, '__iter__'): GET.setlist(actual_attr, actual_val) else: GET[actual_attr] = unicode(actual_val) elif op == "+=": if actual_val is None or actual_val == []: if GET.has_key(actual_attr): del GET[actual_attr] elif hasattr(actual_val, '__iter__'): GET.setlist( actual_attr, GET.getlist(actual_attr) + list(actual_val)) else: GET.appendlist(actual_attr, unicode(actual_val)) elif op == "-=": li = GET.getlist(actual_attr) if hasattr(actual_val, '__iter__'): for v in list(actual_val): if v in li: li.remove(v) GET.setlist(actual_attr, li) else: actual_val = unicode(actual_val) if actual_val in li: li.remove(actual_val) GET.setlist(actual_attr, li) return fix_ampersands(GET.urlencode())
def search_url(context, facet='', term='', remove=False, absolute=True): """ Generate search url starting from action-list defined url. If absolute is True (as default) this tag remove all other facets, to returns only the provided one (facet:term). Else replace or append provided facet:term. If remove is True this tag remove only provided facet:term. """ if 'search_using' in context: if context['search_using'].startswith('initiative'): url = reverse('projects:initiative-search') else: url = reverse('projects:project-search') else: # back-compatibility url = reverse('projects:project-search') if not facet: return url if not term: if absolute: return u"{0}?q={1}".format(url, facet) else: query = context['request'].GET.copy() query['q'] = facet return u"{0}?{1}".format(url, query.urlencode(safe=':/')) value = u"{0}:{1}".format(facet, term) if absolute: query = QueryDict('', mutable=True) query.update({'selected_facets': value}) return u"{0}?{1}".format(url, query.urlencode(safe=':/')) query = context['request'].GET.copy() if remove: if value in query.getlist('selected_facets'): query.setlist( 'selected_facets', filter(lambda x: x != value, query.getlist('selected_facets'))) else: if value not in query.getlist('selected_facets'): query.appendlist('selected_facets', value) # remove page from query to avoid empty pages if 'page' in query: del query['page'] return u"{0}?{1}".format(url, query.urlencode(safe=':/'))
def get_url_from_cleaned_data(region, data): query = QueryDict(mutable=True) for key, values in data.items(): if key in ['deal_type']: continue if not hasattr(values, '__iter__'): values = [values] for value in values: if value: # getattr для обработки объектов типа Region query.appendlist(key, unicode(getattr(value, 'id', value))) return u'?%s' % get_sorted_urlencode(query.lists()) if len( query) else ''
def render(self, context): try: GET = context.get('request').GET.copy() except AttributeError: GET = QueryDict("", mutable=True) for attr, op, val in self.triples: actual_attr = attr.resolve(context) try: actual_val = val.resolve(context) except: if val.var == "None": actual_val = None else: actual_val = val.var if actual_attr: if op == "=": if actual_val is None or actual_val == []: if GET.has_key(actual_attr): del GET[actual_attr] elif hasattr(actual_val, '__iter__'): GET.setlist(actual_attr, actual_val) else: GET[actual_attr] = unicode(actual_val) elif op == "+=": if actual_val is None or actual_val == []: if GET.has_key(actual_attr): del GET[actual_attr] elif hasattr(actual_val, '__iter__'): GET.setlist(actual_attr, GET.getlist(actual_attr) + list(actual_val)) else: GET.appendlist(actual_attr, unicode(actual_val)) elif op == "-=": li = GET.getlist(actual_attr) if hasattr(actual_val, '__iter__'): for v in list(actual_val): if v in li: li.remove(v) GET.setlist(actual_attr, li) else: actual_val = unicode(actual_val) if actual_val in li: li.remove(actual_val) GET.setlist(actual_attr, li) return fix_ampersands(GET.urlencode())
def recreateRequest(self, request): reqargs = RequestArgument.objects.filter(request=self) onedict = {} multidict = QueryDict('', mutable=True) for arg in reqargs: onedict[arg.name] = arg.value multidict.appendlist(arg.name, arg.value) if ('format' in getDataFromRequest(request)): argname = unicode('format') argvalue = getDataFromRequest(request).get('format') onedict[argname] = argvalue multidict.appendlist(argname, argvalue) redata = multidict.copy() redata.update(onedict) return HttpRequestReplay(request, self.path, onedict)
def test_immutability(self): q = QueryDict() with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') with self.assertRaises(AttributeError): q.setlist('foo', ['bar']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear()
def test_immutability(self): q = QueryDict() with self.assertRaises(AttributeError): q.__setitem__("something", "bar") with self.assertRaises(AttributeError): q.setlist("foo", ["bar"]) with self.assertRaises(AttributeError): q.appendlist("foo", ["bar"]) with self.assertRaises(AttributeError): q.update({"foo": "bar"}) with self.assertRaises(AttributeError): q.pop("foo") with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear()
def test_single_key_value(self): """Test QueryDict with one key/value pair""" q = QueryDict(str('foo=bar')) self.assertEqual(q['foo'], 'bar') with self.assertRaises(KeyError): q.__getitem__('bar') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('foo', 'default'), 'bar') self.assertEqual(q.get('bar', 'default'), 'default') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.getlist('bar'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) if six.PY2: self.assertTrue(q.has_key('foo')) self.assertIn('foo', q) if six.PY2: self.assertFalse(q.has_key('bar')) self.assertNotIn('bar', q) self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')]) self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])]) self.assertEqual(list(six.iterkeys(q)), ['foo']) self.assertEqual(list(six.itervalues(q)), ['bar']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') self.assertEqual(q.urlencode(), 'foo=bar')
def test_single_key_value(self): """Test QueryDict with one key/value pair""" q = QueryDict(str("foo=bar")) self.assertEqual(q["foo"], "bar") with self.assertRaises(KeyError): q.__getitem__("bar") with self.assertRaises(AttributeError): q.__setitem__("something", "bar") self.assertEqual(q.get("foo", "default"), "bar") self.assertEqual(q.get("bar", "default"), "default") self.assertEqual(q.getlist("foo"), ["bar"]) self.assertEqual(q.getlist("bar"), []) with self.assertRaises(AttributeError): q.setlist("foo", ["bar"]) with self.assertRaises(AttributeError): q.appendlist("foo", ["bar"]) if six.PY2: self.assertTrue(q.has_key("foo")) self.assertIn("foo", q) if six.PY2: self.assertFalse(q.has_key("bar")) self.assertNotIn("bar", q) self.assertEqual(list(six.iteritems(q)), [("foo", "bar")]) self.assertEqual(list(six.iterlists(q)), [("foo", ["bar"])]) self.assertEqual(list(six.iterkeys(q)), ["foo"]) self.assertEqual(list(six.itervalues(q)), ["bar"]) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({"foo": "bar"}) with self.assertRaises(AttributeError): q.pop("foo") with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault("foo", "bar") self.assertEqual(q.urlencode(), "foo=bar")
def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict(str('vote=yes&vote=no')) self.assertEqual(q['vote'], 'no') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('vote', 'default'), 'no') self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.getlist('vote'), ['yes', 'no']) self.assertEqual(q.getlist('foo'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) if six.PY2: self.assertIs(q.has_key('vote'), True) self.assertIn('vote', q) if six.PY2: self.assertIs(q.has_key('foo'), False) self.assertNotIn('foo', q) self.assertEqual(list(six.iteritems(q)), [('vote', 'no')]) self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])]) self.assertEqual(list(six.iterkeys(q)), ['vote']) self.assertEqual(list(six.itervalues(q)), ['no']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') with self.assertRaises(AttributeError): q.__delitem__('vote')
def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict(str("vote=yes&vote=no")) self.assertEqual(q["vote"], "no") with self.assertRaises(AttributeError): q.__setitem__("something", "bar") self.assertEqual(q.get("vote", "default"), "no") self.assertEqual(q.get("foo", "default"), "default") self.assertEqual(q.getlist("vote"), ["yes", "no"]) self.assertEqual(q.getlist("foo"), []) with self.assertRaises(AttributeError): q.setlist("foo", ["bar", "baz"]) with self.assertRaises(AttributeError): q.setlist("foo", ["bar", "baz"]) with self.assertRaises(AttributeError): q.appendlist("foo", ["bar"]) if six.PY2: self.assertEqual(q.has_key("vote"), True) self.assertEqual("vote" in q, True) if six.PY2: self.assertEqual(q.has_key("foo"), False) self.assertEqual("foo" in q, False) self.assertEqual(list(six.iteritems(q)), [("vote", "no")]) self.assertEqual(list(six.iterlists(q)), [("vote", ["yes", "no"])]) self.assertEqual(list(six.iterkeys(q)), ["vote"]) self.assertEqual(list(six.itervalues(q)), ["no"]) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({"foo": "bar"}) with self.assertRaises(AttributeError): q.pop("foo") with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault("foo", "bar") with self.assertRaises(AttributeError): q.__delitem__("vote")
def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict(str('vote=yes&vote=no')) self.assertEqual(q['vote'], 'no') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('vote', 'default'), 'no') self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.getlist('vote'), ['yes', 'no']) self.assertEqual(q.getlist('foo'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) if six.PY2: self.assertEqual(q.has_key('vote'), True) self.assertEqual('vote' in q, True) if six.PY2: self.assertEqual(q.has_key('foo'), False) self.assertEqual('foo' in q, False) self.assertEqual(list(six.iteritems(q)), [('vote', 'no')]) self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])]) self.assertEqual(list(six.iterkeys(q)), ['vote']) self.assertEqual(list(six.itervalues(q)), ['no']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') with self.assertRaises(AttributeError): q.__delitem__('vote')
def modify_urlquery(in_url, **kwargs): """Modify the query part of a URL. Supplying delete=True will remove all matching parameters matching the value. """ safe = kwargs.pop('safe', None) delete = kwargs.pop('delete', False) scheme, netloc, path, query, fragment = urlsplit(in_url) q_dict = QueryDict(query, mutable=True) for k, v in kwargs.items(): param_list = q_dict.getlist(k) if delete: q_dict.setlist(k, [x for x in param_list if x != v]) else: if v not in param_list: q_dict.appendlist(k, v) return urlunsplit((scheme, netloc, path, q_dict.urlencode(safe=safe), fragment))
def modify_urlquery(in_url, **kwargs): """Modify the query part of a URL. Supplying delete=True will remove all matching parameters matching the value. """ safe = kwargs.pop('safe', None) delete = kwargs.pop('delete', False) scheme, netloc, path, query, fragment = urlsplit(in_url) q_dict = QueryDict(query, mutable=True) for k, v in kwargs.items(): param_list = q_dict.getlist(k) if delete: q_dict.setlist(k, [x for x in param_list if x != v]) else: if v not in param_list: q_dict.appendlist(k, v) return urlunsplit( (scheme, netloc, path, q_dict.urlencode(safe=safe), fragment))
def test_basic_mutable_operations(self): q = QueryDict(str('')).copy() q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') if six.PY2: self.assertTrue(q.has_key('foo')) self.assertTrue('foo' in q) self.assertListEqual(sorted(list(six.iteritems(q))), [('foo', 'another'), ('name', 'john')]) self.assertListEqual(sorted(list(six.iterlists(q))), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])]) self.assertListEqual(sorted(list(six.iterkeys(q))), ['foo', 'name']) self.assertListEqual(sorted(list(six.itervalues(q))), ['another', 'john']) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar']) q.clear() self.assertEqual(len(q), 0)
def test_basic_mutable_operations(self): q = QueryDict(str('')).copy() q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') if not six.PY3: self.assertTrue(q.has_key('foo')) self.assertTrue('foo' in q) self.assertEqual(sorted(list(six.iteritems(q))), [('foo', 'another'), ('name', 'john')]) self.assertEqual(sorted(list(six.iterlists(q))), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])]) self.assertEqual(sorted(list(six.iterkeys(q))), ['foo', 'name']) self.assertEqual(sorted(list(six.itervalues(q))), ['another', 'john']) self.assertEqual(len(q), 2) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar']) q.clear() self.assertEqual(len(q), 0)
def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict('vote=yes&vote=no') self.assertEqual(q['vote'], 'no') with self.assertRaises(AttributeError): q.__setitem__('something', 'bar') self.assertEqual(q.get('vote', 'default'), 'no') self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.getlist('vote'), ['yes', 'no']) self.assertEqual(q.getlist('foo'), []) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.setlist('foo', ['bar', 'baz']) with self.assertRaises(AttributeError): q.appendlist('foo', ['bar']) self.assertIn('vote', q) self.assertNotIn('foo', q) self.assertEqual(list(q), ['vote']) self.assertEqual(list(q.items()), [('vote', 'no')]) self.assertEqual(list(q.lists()), [('vote', ['yes', 'no'])]) self.assertEqual(list(q.keys()), ['vote']) self.assertEqual(list(q.values()), ['no']) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({'foo': 'bar'}) with self.assertRaises(AttributeError): q.pop('foo') with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault('foo', 'bar') with self.assertRaises(AttributeError): q.__delitem__('vote')
def test_multiple_keys(self): """Test QueryDict with two key/value pairs with same keys.""" q = QueryDict("vote=yes&vote=no") self.assertEqual(q["vote"], "no") with self.assertRaises(AttributeError): q.__setitem__("something", "bar") self.assertEqual(q.get("vote", "default"), "no") self.assertEqual(q.get("foo", "default"), "default") self.assertEqual(q.getlist("vote"), ["yes", "no"]) self.assertEqual(q.getlist("foo"), []) with self.assertRaises(AttributeError): q.setlist("foo", ["bar", "baz"]) with self.assertRaises(AttributeError): q.setlist("foo", ["bar", "baz"]) with self.assertRaises(AttributeError): q.appendlist("foo", ["bar"]) self.assertIn("vote", q) self.assertNotIn("foo", q) self.assertEqual(list(q), ["vote"]) self.assertEqual(list(q.items()), [("vote", "no")]) self.assertEqual(list(q.lists()), [("vote", ["yes", "no"])]) self.assertEqual(list(q.keys()), ["vote"]) self.assertEqual(list(q.values()), ["no"]) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({"foo": "bar"}) with self.assertRaises(AttributeError): q.pop("foo") with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault("foo", "bar") with self.assertRaises(AttributeError): q.__delitem__("vote")
def test_single_key_value(self): """Test QueryDict with one key/value pair""" q = QueryDict("foo=bar") self.assertEqual(q["foo"], "bar") with self.assertRaises(KeyError): q.__getitem__("bar") with self.assertRaises(AttributeError): q.__setitem__("something", "bar") self.assertEqual(q.get("foo", "default"), "bar") self.assertEqual(q.get("bar", "default"), "default") self.assertEqual(q.getlist("foo"), ["bar"]) self.assertEqual(q.getlist("bar"), []) with self.assertRaises(AttributeError): q.setlist("foo", ["bar"]) with self.assertRaises(AttributeError): q.appendlist("foo", ["bar"]) self.assertIn("foo", q) self.assertNotIn("bar", q) self.assertEqual(list(q), ["foo"]) self.assertEqual(list(q.items()), [("foo", "bar")]) self.assertEqual(list(q.lists()), [("foo", ["bar"])]) self.assertEqual(list(q.keys()), ["foo"]) self.assertEqual(list(q.values()), ["bar"]) self.assertEqual(len(q), 1) with self.assertRaises(AttributeError): q.update({"foo": "bar"}) with self.assertRaises(AttributeError): q.pop("foo") with self.assertRaises(AttributeError): q.popitem() with self.assertRaises(AttributeError): q.clear() with self.assertRaises(AttributeError): q.setdefault("foo", "bar") self.assertEqual(q.urlencode(), "foo=bar")
def test_basic_mutable_operations(self): q = QueryDict('').copy() q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') self.failUnless(q.has_key('foo')) self.failUnless('foo' in q) self.assertEqual(q.items(), [(u'foo', u'another'), (u'name', u'john')]) self.assertEqual(q.lists(), [(u'foo', [u'bar', u'baz', u'another']), (u'name', [u'john'])]) self.assertEqual(q.keys(), [u'foo', u'name']) self.assertEqual(q.values(), [u'another', u'john']) self.assertEqual(len(q), 2) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), [u'bar', u'baz', u'another', u'hello']) self.assertEqual(q.pop('foo'), [u'bar', u'baz', u'another', u'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.urlencode(), 'foo=bar&name=john') q.clear() self.assertEqual(len(q), 0)
def test_basic_mutable_operations(self): q = QueryDict(mutable=True) q["name"] = "john" self.assertEqual(q.get("foo", "default"), "default") self.assertEqual(q.get("name", "default"), "john") self.assertEqual(q.getlist("name"), ["john"]) self.assertEqual(q.getlist("foo"), []) q.setlist("foo", ["bar", "baz"]) self.assertEqual(q.get("foo", "default"), "baz") self.assertEqual(q.getlist("foo"), ["bar", "baz"]) q.appendlist("foo", "another") self.assertEqual(q.getlist("foo"), ["bar", "baz", "another"]) self.assertEqual(q["foo"], "another") self.assertIn("foo", q) self.assertCountEqual(q, ["foo", "name"]) self.assertCountEqual(q.items(), [("foo", "another"), ("name", "john")]) self.assertCountEqual(q.lists(), [("foo", ["bar", "baz", "another"]), ("name", ["john"])]) self.assertCountEqual(q.keys(), ["foo", "name"]) self.assertCountEqual(q.values(), ["another", "john"]) q.update({"foo": "hello"}) self.assertEqual(q["foo"], "hello") self.assertEqual(q.get("foo", "not available"), "hello") self.assertEqual(q.getlist("foo"), ["bar", "baz", "another", "hello"]) self.assertEqual(q.pop("foo"), ["bar", "baz", "another", "hello"]) self.assertEqual(q.pop("foo", "not there"), "not there") self.assertEqual(q.get("foo", "not there"), "not there") self.assertEqual(q.setdefault("foo", "bar"), "bar") self.assertEqual(q["foo"], "bar") self.assertEqual(q.getlist("foo"), ["bar"]) self.assertIn(q.urlencode(), ["foo=bar&name=john", "name=john&foo=bar"]) q.clear() self.assertEqual(len(q), 0)
def test_basic_mutable_operations(self): q = QueryDict(mutable=True) q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') self.assertIn('foo', q) self.assertCountEqual(q, ['foo', 'name']) self.assertCountEqual(q.items(), [('foo', 'another'), ('name', 'john')]) self.assertCountEqual(q.lists(), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])]) self.assertCountEqual(q.keys(), ['foo', 'name']) self.assertCountEqual(q.values(), ['another', 'john']) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar']) q.clear() self.assertEqual(len(q), 0)
def test_basic_mutable_operations(self): q = QueryDict(str("")).copy() q["name"] = "john" self.assertEqual(q.get("foo", "default"), "default") self.assertEqual(q.get("name", "default"), "john") self.assertEqual(q.getlist("name"), ["john"]) self.assertEqual(q.getlist("foo"), []) q.setlist("foo", ["bar", "baz"]) self.assertEqual(q.get("foo", "default"), "baz") self.assertEqual(q.getlist("foo"), ["bar", "baz"]) q.appendlist("foo", "another") self.assertEqual(q.getlist("foo"), ["bar", "baz", "another"]) self.assertEqual(q["foo"], "another") if not six.PY3: self.assertTrue(q.has_key("foo")) self.assertTrue("foo" in q) self.assertEqual(list(six.iteritems(q)), [("foo", "another"), ("name", "john")]) self.assertEqual(list(six.iterlists(q)), [("foo", ["bar", "baz", "another"]), ("name", ["john"])]) self.assertEqual(list(six.iterkeys(q)), ["foo", "name"]) self.assertEqual(list(six.itervalues(q)), ["another", "john"]) self.assertEqual(len(q), 2) q.update({"foo": "hello"}) self.assertEqual(q["foo"], "hello") self.assertEqual(q.get("foo", "not available"), "hello") self.assertEqual(q.getlist("foo"), ["bar", "baz", "another", "hello"]) self.assertEqual(q.pop("foo"), ["bar", "baz", "another", "hello"]) self.assertEqual(q.pop("foo", "not there"), "not there") self.assertEqual(q.get("foo", "not there"), "not there") self.assertEqual(q.setdefault("foo", "bar"), "bar") self.assertEqual(q["foo"], "bar") self.assertEqual(q.getlist("foo"), ["bar"]) self.assertEqual(q.urlencode(), "foo=bar&name=john") q.clear() self.assertEqual(len(q), 0)
def test_basic_mutable_operations(self): q = QueryDict('').copy() q['name'] = 'john' self.assertEqual(q.get('foo', 'default'), 'default') self.assertEqual(q.get('name', 'default'), 'john') self.assertEqual(q.getlist('name'), ['john']) self.assertEqual(q.getlist('foo'), []) q.setlist('foo', ['bar', 'baz']) self.assertEqual(q.get('foo', 'default'), 'baz') self.assertEqual(q.getlist('foo'), ['bar', 'baz']) q.appendlist('foo', 'another') self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another']) self.assertEqual(q['foo'], 'another') self.assertTrue(q.has_key('foo')) self.assertTrue('foo' in q) self.assertEqual(q.items(), [(u'foo', u'another'), (u'name', u'john')]) self.assertEqual(q.lists(), [(u'foo', [u'bar', u'baz', u'another']), (u'name', [u'john'])]) self.assertEqual(q.keys(), [u'foo', u'name']) self.assertEqual(q.values(), [u'another', u'john']) self.assertEqual(len(q), 2) q.update({'foo': 'hello'}) self.assertEqual(q['foo'], 'hello') self.assertEqual(q.get('foo', 'not available'), 'hello') self.assertEqual(q.getlist('foo'), [u'bar', u'baz', u'another', u'hello']) self.assertEqual(q.pop('foo'), [u'bar', u'baz', u'another', u'hello']) self.assertEqual(q.pop('foo', 'not there'), 'not there') self.assertEqual(q.get('foo', 'not there'), 'not there') self.assertEqual(q.setdefault('foo', 'bar'), 'bar') self.assertEqual(q['foo'], 'bar') self.assertEqual(q.getlist('foo'), ['bar']) self.assertEqual(q.urlencode(), 'foo=bar&name=john') q.clear() self.assertEqual(len(q), 0)
def submit_url(self, params={}, defaults=True): """ Return a form submit url with the given params. """ action = self.attr('action') values = QueryDict('', mutable=True) if defaults: for el in self.find('input'): if el.get('type') in ('checkbox', 'radio'): if el.get('checked'): values.appendlist(el.name, el.value or '') else: values.appendlist(el.name, el.value or '') #overwrite the querydict if isinstance(params, QueryDict): values.update(params) else: for key, val in params.items(): if val is None: del values[key] else: values[key] = val return self.absolute_url(action or '') + '?' + values.urlencode()
class MultiPartParser: """ A rfc2388 multipart/form-data parser. ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. """ def __init__(self, META, input_data, upload_handlers, encoding=None): """ Initialize the MultiPartParser object. :META: The standard ``META`` dictionary in Django request objects. :input_data: The raw post data, as a file-like object. :upload_handlers: A list of UploadHandler instances that perform operations on the uploaded data. :encoding: The encoding with which to treat the incoming data. """ # Content-Type should contain multipart and the boundary information. content_type = META.get('CONTENT_TYPE', '') if not content_type.startswith('multipart/'): raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # Parse the header to get the boundary to split the parts. try: ctypes, opts = parse_header(content_type.encode('ascii')) except UnicodeEncodeError: raise MultiPartParserError( 'Invalid non-ASCII Content-Type in multipart: %s' % force_str(content_type)) boundary = opts.get('boundary') if not boundary or not cgi.valid_boundary(boundary): raise MultiPartParserError('Invalid boundary in multipart: %s' % force_str(boundary)) # Content-Length should contain the length of the body we are about # to receive. try: content_length = int(META.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): content_length = 0 if content_length < 0: # This means we shouldn't continue...raise an error. raise MultiPartParserError("Invalid content length: %r" % content_length) if isinstance(boundary, str): boundary = boundary.encode('ascii') self._boundary = boundary self._input_data = input_data # For compatibility with low-level network APIs (with 32-bit integers), # the chunk size should be < 2^31, but still divisible by 4. possible_sizes = [ x.chunk_size for x in upload_handlers if x.chunk_size ] self._chunk_size = min([2**31 - 4] + possible_sizes) self._meta = META self._encoding = encoding or settings.DEFAULT_CHARSET self._content_length = content_length self._upload_handlers = upload_handlers def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Return a tuple containing the POST and FILES dictionary, respectively. """ from django.http import QueryDict encoding = self._encoding handlers = self._upload_handlers # HTTP spec says that Content-Length >= 0 is valid # handling content-length == 0 before continuing if self._content_length == 0: return QueryDict(encoding=self._encoding), MultiValueDict() # See if any of the handlers take care of the parsing. # This allows overriding everything if need be. for handler in handlers: result = handler.handle_raw_input( self._input_data, self._meta, self._content_length, self._boundary, encoding, ) # Check to see if it was handled if result is not None: return result[0], result[1] # Create the data structures to be used later. self._post = QueryDict(mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) # Number of bytes that have been read. num_bytes_read = 0 # To count the number of keys in the request. num_post_keys = 0 # To limit the amount of data read from the request. read_size = None try: for item_type, meta_data, field_stream in Parser( stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_str(field_name, encoding, errors='replace') if item_type == FIELD: # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS. num_post_keys += 1 if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys): raise TooManyFieldsSent( 'The number of GET/POST parameters exceeded ' 'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.') # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE. if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None: read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read # This is a post field, we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read(size=read_size) num_bytes_read += len(raw_data) try: data = base64.b64decode(raw_data) except binascii.Error: data = raw_data else: data = field_stream.read(size=read_size) num_bytes_read += len(data) # Add two here to make the check consistent with the # x-www-form-urlencoded check that includes '&='. num_bytes_read += len(field_name) + 2 if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig( 'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.' ) self._post.appendlist( field_name, force_str(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... file_name = disposition.get('filename') if file_name: file_name = force_str(file_name, encoding, errors='replace') file_name = self.IE_sanitize(html.unescape(file_name)) if not file_name: continue content_type, content_type_extra = meta_data.get( 'content-type', ('', {})) content_type = content_type.strip() charset = content_type_extra.get('charset') try: content_length = int( meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) try: for handler in handlers: try: handler.new_file( field_name, file_name, content_type, content_length, charset, content_type_extra, ) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding # We should always decode base64 chunks by multiple of 4, # ignoring whitespace. stripped_chunk = b"".join(chunk.split()) remaining = len(stripped_chunk) % 4 while remaining != 0: over_chunk = field_stream.read(4 - remaining) stripped_chunk += b"".join( over_chunk.split()) remaining = len(stripped_chunk) % 4 try: chunk = base64.b64decode(stripped_chunk) except Exception as exc: # Since this is only a chunk, any error is an unfixable error. raise MultiPartParserError( "Could not decode base64 data." ) from exc for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk( chunk, counters[i]) counters[i] += chunk_length if chunk is None: # Don't continue if the chunk received by # the handler is None. break except SkipFile: self._close_files() # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload as e: self._close_files() if not e.connection_reset: exhaust(self._input_data) else: # Make sure that the request data is all fed exhaust(self._input_data) # Signal that the upload has completed. # any() shortcircuits if a handler's upload_complete() returns a value. any(handler.upload_complete() for handler in handlers) self._post._mutable = False return self._post, self._files def handle_file_complete(self, old_field_name, counters): """ Handle all the signaling that takes place when a file is complete. """ for i, handler in enumerate(self._upload_handlers): file_obj = handler.file_complete(counters[i]) if file_obj: # If it returns a file object, then set the files dict. self._files.appendlist( force_str(old_field_name, self._encoding, errors='replace'), file_obj) break def IE_sanitize(self, filename): """Cleanup filename from Internet Explorer full paths.""" return filename and filename[filename.rfind("\\") + 1:].strip() def _close_files(self): # Free up all file handles. # FIXME: this currently assumes that upload handlers store the file as 'file' # We should document that... (Maybe add handler.free_file to complement new_file) for handler in self._upload_handlers: if hasattr(handler, 'file'): handler.file.close()
"""
class MultiPartParser(object): """ A rfc2388 multipart/form-data parser. ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. """ def __init__(self, META, input_data, upload_handlers, encoding=None): """ Initialize the MultiPartParser object. :META: The standard ``META`` dictionary in Django request objects. :input_data: The raw post data, as a file-like object. :upload_handlers: A list of UploadHandler instances that perform operations on the uploaded data. :encoding: The encoding with which to treat the incoming data. """ # # Content-Type should contain multipart and the boundary information. # content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', '')) if not content_type.startswith('multipart/'): raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # Parse the header to get the boundary to split the parts. ctypes, opts = parse_header(content_type.encode('ascii')) boundary = opts.get('boundary') if not boundary or not cgi.valid_boundary(boundary): raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary) # Content-Length should contain the length of the body we are about # to receive. try: content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0))) except (ValueError, TypeError): content_length = 0 if content_length < 0: # This means we shouldn't continue...raise an error. raise MultiPartParserError("Invalid content length: %r" % content_length) if isinstance(boundary, six.text_type): boundary = boundary.encode('ascii') self._boundary = boundary self._input_data = input_data # For compatibility with low-level network APIs (with 32-bit integers), # the chunk size should be < 2^31, but still divisible by 4. possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] self._chunk_size = min([2 ** 31 - 4] + possible_sizes) self._meta = META self._encoding = encoding or settings.DEFAULT_CHARSET self._content_length = content_length self._upload_handlers = upload_handlers def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Returns a tuple containing the POST and FILES dictionary, respectively. """ # We have to import QueryDict down here to avoid a circular import. from django.http import QueryDict encoding = self._encoding handlers = self._upload_handlers # HTTP spec says that Content-Length >= 0 is valid # handling content-length == 0 before continuing if self._content_length == 0: return QueryDict('', encoding=self._encoding), MultiValueDict() # See if any of the handlers take care of the parsing. # This allows overriding everything if need be. for handler in handlers: result = handler.handle_raw_input(self._input_data, self._meta, self._content_length, self._boundary, encoding) # Check to see if it was handled if result is not None: return result[0], result[1] # Create the data structures to be used later. self._post = QueryDict('', mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) try: for item_type, meta_data, field_stream in Parser(stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_text(field_name, encoding, errors='replace') if item_type == FIELD: # This is a post field, we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read() try: data = base64.b64decode(raw_data) except _BASE64_DECODE_ERROR: data = raw_data else: data = field_stream.read() self._post.appendlist(field_name, force_text(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... file_name = disposition.get('filename') if not file_name: continue file_name = force_text(file_name, encoding, errors='replace') file_name = self.IE_sanitize(unescape_entities(file_name)) content_type, content_type_extra = meta_data.get('content-type', ('', {})) content_type = content_type.strip() charset = content_type_extra.get('charset') try: content_length = int(meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) try: for handler in handlers: try: handler.new_file(field_name, file_name, content_type, content_length, charset, content_type_extra) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding # We should always decode base64 chunks by multiple of 4, # ignoring whitespace. stripped_chunk = b"".join(chunk.split()) remaining = len(stripped_chunk) % 4 while remaining != 0: over_chunk = field_stream.read(4 - remaining) stripped_chunk += b"".join(over_chunk.split()) remaining = len(stripped_chunk) % 4 try: chunk = base64.b64decode(stripped_chunk) except Exception as e: # Since this is only a chunk, any error is an unfixable error. msg = "Could not decode base64 data: %r" % e six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2]) for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk(chunk, counters[i]) counters[i] += chunk_length if chunk is None: # If the chunk received by the handler is None, then don't continue. break except SkipFile: self._close_files() # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload as e: self._close_files() if not e.connection_reset: exhaust(self._input_data) else: # Make sure that the request data is all fed exhaust(self._input_data) # Signal that the upload has completed. for handler in handlers: retval = handler.upload_complete() if retval: break return self._post, self._files def handle_file_complete(self, old_field_name, counters): """ Handle all the signaling that takes place when a file is complete. """ for i, handler in enumerate(self._upload_handlers): file_obj = handler.file_complete(counters[i]) if file_obj: # If it returns a file object, then set the files dict. self._files.appendlist( force_text(old_field_name, self._encoding, errors='replace'), file_obj) break def IE_sanitize(self, filename): """Cleanup filename from Internet Explorer full paths.""" return filename and filename[filename.rfind("\\") + 1:].strip() def _close_files(self): # Free up all file handles. # FIXME: this currently assumes that upload handlers store the file as 'file' # We should document that... (Maybe add handler.free_file to complement new_file) for handler in self._upload_handlers: if hasattr(handler, 'file'): handler.file.close()
class IntelligentUploadHandler(FileUploadHandler): """ An upload handler which overrides the default multipart parser to allow simultaneous parsing of fields and files... intelligently. Subclass this for real and true awesomeness. """ def __init__(self, *args, **kwargs): super(IntelligentUploadHandler, self).__init__(*args, **kwargs) def field_parsed(self, field_name, field_value): """ A callback method triggered when a non-file field has been parsed successfully by the parser. Use this to listen for new fields being parsed. """ pass def handle_raw_input( self, input_data, META, content_length, boundary, encoding=None): """ Parse the raw input from the HTTP request and split items into fields and files, executing callback methods as necessary. Shamelessly adapted and borrowed from django.http.multiparser.MultiPartParser. """ # following suit from the source class, this is imported here to avoid # a potential circular import from django.http import QueryDict # create return values self.POST = QueryDict('', mutable=True) self.FILES = MultiValueDict() # initialize the parser and stream stream = LazyStream(ChunkIter(input_data, self.chunk_size)) # whether or not to signal a file-completion at the beginning # of the loop. old_field_name = None counter = 0 try: for item_type, meta_data, field_stream in Parser(stream, boundary): if old_field_name: # we run this test at the beginning of the next loop since # we cannot be sure a file is complete until we hit the # next boundary/part of the multipart content. file_obj = self.file_complete(counter) if file_obj: # if we return a file object, add it to the files dict self.FILES.appendlist(force_text( old_field_name, encoding, errors='replace'), file_obj) # wipe it out to prevent havoc old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_text(field_name, encoding, errors='replace') if item_type == FIELD: # this is a POST field if transfer_encoding == "base64": raw_data = field_stream.read() try: data = str(raw_data).decode('base64') except: data = raw_data else: data = field_stream.read() self.POST.appendlist(field_name, force_text( data, encoding, errors='replace')) # trigger listener self.field_parsed(field_name, self.POST.get(field_name)) elif item_type == FILE: # this is a file file_name = disposition.get('filename') if not file_name: continue # transform the file name file_name = force_text( file_name, encoding, errors='replace') file_name = self.IE_sanitize(unescape_entities(file_name)) content_type = meta_data.get( 'content-type', ('',))[0].strip() try: charset = meta_data.get('content-type', (0, {}))[1]\ .get('charset', None) except: charset = None try: file_content_length = int( meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): file_content_length = None counter = 0 # now, do the important file stuff try: # alert on the new file kwargs = { 'content_type': content_type, 'content_length': file_content_length, 'charset': charset} self.new_file(field_name, file_name, **kwargs) # chubber-chunk it for chunk in field_stream: # we need AES compatibles blocks (multiples of 16 bits) over_bytes = len(chunk) % 16 if over_bytes: over_chunk =\ field_stream.read(16 - over_bytes) chunk += over_chunk if transfer_encoding == "base64": try: chunk = base64.b64decode(chunk) except Exception as e: # since this is anly a chunk, any # error is an unfixable error raise MultiPartParserError( "Could not decode base64 data: %r" % e) chunk_length = len(chunk) self.receive_data_chunk(chunk, counter) counter += chunk_length if counter > settings.UPLOAD_FILE_SIZE_LIMIT: raise SkipFile('File is too big.') # ... and we're done except SkipFile: # just eat the rest exhaust(field_stream) else: # handle file upload completions on next iteration old_field_name = field_name except StopUpload as e: # if we get a request to stop the upload, # exhaust it if no con reset if not e.connection_reset: exhaust(input_data) else: # make sure that the request data is all fed exhaust(input_data) # signal the upload has been completed self.upload_complete() return self.POST, self.FILES def IE_sanitize(self, filename): """Cleanup filename from Internet Explorer full paths.""" return filename and filename[filename.rfind("\\") + 1:].strip()
def setup(request): if request.method != 'POST': raise PermissionDenied(_('Only POST request are allowed')) ct_type = request.META.get('CONTENT_TYPE', '') infos = [x.strip().partition('=') for x in ct_type.split(';')] boundary, encoding = None, 'ascii' for info in infos: if info[0] == 'boundary': boundary = info[2] elif info[0] == 'charset': encoding = info[2] if boundary is None: raise PermissionDenied(_('Invalid POST form')) # parse the POST query by hand mid_boundary = ('\n--' + boundary + '\n').encode(encoding) end_boundary = ('\n--' + boundary + '--\n').encode(encoding) fields = request.body.split(mid_boundary) values = QueryDict('', mutable=True, encoding=encoding) files = {} for part in fields: lines = part.split(b'\n\n', 1) if len(lines) != 2: continue infos = [x.strip().partition('=') for x in lines[0].decode(encoding).split(';')] key, filename = None, None for info in infos: if info[0] == 'name': key = info[2][1:-1] elif info[0] == 'filename': filename = info[2][1:-1] if key is None: continue value = lines[1] if value.endswith(end_boundary): value = value[:-len(end_boundary)] if filename is None: values.setlistdefault(key, []) values.appendlist(key, value) else: files[key] = filename, value # the POST data are parsed, let's go action = values.get(':action') if action in ('submit', 'file_upload'): package_name = values.get('name', '') version_name = values.get('version', '') if not package_name or not version_name: raise PermissionDenied(_('No package name provided')) if request.user.is_anonymous: return HttpResponse(ugettext('You must be authenticated'), status=401) package, package_created = Package.objects.get_or_create(name=package_name) if package_created: PackageRole(package=package, user=request.user, role=PackageRole.OWNER).save() elif not request.user.is_superuser: if PackageRole.objects.filter(package=package, user=request.user).count() == 0: return HttpResponse(ugettext('You are not allowed to update this package'), status=401) for attr_name in ('name', 'home_page', 'author_email', 'download_url', 'author', 'license', 'summary', 'maintainer', 'maintainer_email', 'project_url', ): if values.get(attr_name): setattr(package, attr_name, values.get(attr_name)) package.save() release, created = Release.objects.get_or_create(package=package, version=version_name) for attr_name in ('stable_version', 'description', 'platform', 'keywords', 'docs_url',): if values.get(attr_name): setattr(package, attr_name, values.get(attr_name)) release.classifiers.clear() for classifier in values.getlist('classifiers', []): release.classifiers.add(Classifier.get(classifier)) for attr_name in ('requires', 'requires_dist', 'provides', 'provides_dist', 'obsoletes', 'obsoletes_dist', 'requires_external', 'requires_python'): getattr(release, attr_name).clear() for dep in values.getlist(attr_name, []): getattr(release, attr_name).add(Dependence.get(dep)) release.save() if action == 'file_upload': if 'content' not in files: raise PermissionDenied filename, content = files['content'] # noinspection PyUnboundLocalVariable if ReleaseDownload.objects.filter(package=package, release=release, filename=filename).count() > 0: raise PermissionDenied md5 = hashlib.md5(content).hexdigest() if md5 != values.get('md5_digest'): raise PermissionDenied download = ReleaseDownload(package=package, release=release, filename=filename) path = download.abspath path_dirname = os.path.dirname(path) if not os.path.isdir(path_dirname): os.makedirs(path_dirname) with open(path, 'wb') as out_fd: out_fd.write(content) download.md5_digest = md5 download.size = len(content) download.upload_time = datetime.datetime.utcnow().replace(tzinfo=utc) download.url = settings.MEDIA_URL + path[MEDIA_ROOT_LEN:] download.file = download.relpath download.package_type = PackageType.get(values.get('filetype', 'source')) download.comment_text = values.get('comment', '') download.python_version = values.get('pyversion') download.log() template_values = {} return TemplateResponse(request, 'pythonnest/simple.html', template_values)
class MultiPartParser(object): """ A rfc2388 multipart/form-data parser. ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. """ def __init__(self, META, input_data, upload_handlers, encoding=None): """ Initialize the MultiPartParser object. :META: The standard ``META`` dictionary in Django request objects. 字典 :input_data: The raw post data, as a file-like object. post 数据 :upload_handler: An UploadHandler instance that performs operations on the uploaded data. :encoding: The encoding with which to treat the incoming data. """ # # Content-Type should containt multipart and the boundary information. # content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', '')) if not content_type.startswith('multipart/'): raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # invalid content-type multipart 是无效的 # Parse the header to get the boundary to split the parts. boundary 界限, 将数据分成就给部分 # parse_header 可以分析 HTTP 头的某一个部位 ctypes, opts = parse_header(content_type.encode('ascii')) boundary = opts.get('boundary') if not boundary or not cgi.valid_boundary(boundary): raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary) # Content-Length should contain the length of the body we are about # to receive. try: content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0))) except (ValueError, TypeError): content_length = 0 if content_length < 0: # This means we shouldn't continue...raise an error. raise MultiPartParserError("Invalid content length: %r" % content_length) if isinstance(boundary, six.text_type): # 转化为 ascii boundary = boundary.encode('ascii') self._boundary = boundary self._input_data = input_data # For compatibility with low-level network APIs (with 32-bit integers), # the chunk size should be < 2^31, but still divisible by 4. possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] self._chunk_size = min([2**31-4] + possible_sizes) self._meta = META #正常赋值 self._encoding = encoding or settings.DEFAULT_CHARSET# 如果没有,设定 setting 中的值 self._content_length = content_length # 上面获取 self._upload_handlers = upload_handlers # 上面获取 def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Returns a tuple containing the POST and FILES dictionary, respectively. """ # We have to import QueryDict down here to avoid a circular import. from django.http import QueryDict encoding = self._encoding handlers = self._upload_handlers # HTTP spec says that Content-Length >= 0 is valid # handling content-length == 0 before continuing if self._content_length == 0: return QueryDict('', encoding=self._encoding), MultiValueDict() # See if the handler will want to take care of the parsing. # This allows overriding everything if somebody wants it. for handler in handlers: result = handler.handle_raw_input(self._input_data, self._meta, self._content_length, self._boundary, encoding) if result is not None: return result[0], result[1] # 返回 # Create the data structures to be used later. self._post = QueryDict('', mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # 未知 # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) try: for item_type, meta_data, field_stream in Parser(stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) 这里更新了 files old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_text(field_name, encoding, errors='replace') 不懂 if item_type == FIELD: # This is a post field, post 域 we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read() 开始读 try: data = str(raw_data).decode('base64') except: data = raw_data else: data = field_stream.read() self._post.appendlist(field_name, force_text(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... 是一个文件 file_name = disposition.get('filename') if not file_name: continue file_name = force_text(file_name, encoding, errors='replace') 不懂 file_name = self.IE_sanitize(unescape_entities(file_name)) 跟 IE 有关 content_type = meta_data.get('content-type', ('',))[0].strip() try: charset = meta_data.get('content-type', (0, {}))[1].get('charset', None) except: charset = None try: content_length = int(meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) try: for handler in handlers: try: handler.new_file(field_name, file_name, content_type, content_length, charset) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding # We should always read base64 streams by multiple of 4 over_bytes = len(chunk) % 4 if over_bytes: over_chunk = field_stream.read(4 - over_bytes) chunk += over_chunk try: chunk = base64.b64decode(chunk) except Exception as e: # Since this is only a chunk, any error is an unfixable error. raise MultiPartParserError("Could not decode base64 data: %r" % e) for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk(chunk, counters[i]) counters[i] += chunk_length if chunk is None: # If the chunk received by the handler is None, then don't continue. break except SkipFile: # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload as e: if not e.connection_reset: exhaust(self._input_data) else: # Make sure that the request data is all fed exhaust(self._input_data) # Signal that the upload has completed. for handler in handlers: retval = handler.upload_complete() if retval: break return self._post, self._files # 返回 post 数据和 file 处理文件的方法, 在这里会更新 _files def handle_file_complete(self, old_field_name, counters): """ Handle all the signalling that takes place when a file is complete. """ for i, handler in enumerate(self._upload_handlers): file_obj = handler.file_complete(counters[i]) if file_obj: # If it returns a file object, then set the files dict. self._files.appendlist(force_text(old_field_name, self._encoding, errors='replace'), file_obj) break def IE_sanitize(self, filename): """Cleanup filename from Internet Explorer full paths.""" return filename and filename[filename.rfind("\\")+1:].strip() 可能只需要文件名,而不用其他的
class MultiPartParser(object): """ A rfc2388 multipart/form-data parser. ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. If """ def __init__(self, META, input_data, upload_handlers, encoding=None): """ Initialize the MultiPartParser object. :META: The standard ``META`` dictionary in Django request objects. :input_data: The raw post data, as a file-like object. :upload_handler: An UploadHandler instance that performs operations on the uploaded data. :encoding: The encoding with which to treat the incoming data. """ # # Content-Type should containt multipart and the boundary information. # content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', '')) if not content_type.startswith('multipart/'): raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # Parse the header to get the boundary to split the parts. ctypes, opts = parse_header(content_type) boundary = opts.get('boundary') if not boundary or not cgi.valid_boundary(boundary): raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary) # # Content-Length should contain the length of the body we are about # to receive. # try: content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0))) except (ValueError, TypeError): # For now set it to 0; we'll try again later on down. content_length = 0 if content_length <= 0: # This means we shouldn't continue...raise an error. raise MultiPartParserError("Invalid content length: %r" % content_length) self._boundary = boundary self._input_data = input_data # For compatibility with low-level network APIs (with 32-bit integers), # the chunk size should be < 2^31, but still divisible by 4. possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] self._chunk_size = min([2**31-4] + possible_sizes) self._meta = META self._encoding = encoding or settings.DEFAULT_CHARSET self._content_length = content_length self._upload_handlers = upload_handlers def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Returns a tuple containing the POST and FILES dictionary, respectively. """ # We have to import QueryDict down here to avoid a circular import. from django.http import QueryDict encoding = self._encoding handlers = self._upload_handlers limited_input_data = LimitBytes(self._input_data, self._content_length) # See if the handler will want to take care of the parsing. # This allows overriding everything if somebody wants it. for handler in handlers: result = handler.handle_raw_input(limited_input_data, self._meta, self._content_length, self._boundary, encoding) if result is not None: return result[0], result[1] # Create the data structures to be used later. self._post = QueryDict('', mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size)) # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) try: for item_type, meta_data, field_stream in Parser(stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) old_field_name = None try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') field_name = force_unicode(field_name, encoding, errors='replace') if item_type == FIELD: # This is a post field, we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read() try: data = str(raw_data).decode('base64') except: data = raw_data else: data = field_stream.read() self._post.appendlist(field_name, force_unicode(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... file_name = disposition.get('filename') if not file_name: continue file_name = force_unicode(file_name, encoding, errors='replace') file_name = self.IE_sanitize(unescape_entities(file_name)) content_type = meta_data.get('content-type', ('',))[0].strip() try: charset = meta_data.get('content-type', (0,{}))[1].get('charset', None) except: charset = None try: content_length = int(meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) try: for handler in handlers: try: handler.new_file(field_name, file_name, content_type, content_length, charset) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding try: chunk = str(chunk).decode('base64') except Exception, e: # Since this is only a chunk, any error is an unfixable error. raise MultiPartParserError("Could not decode base64 data: %r" % e) for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk(chunk, counters[i]) counters[i] += chunk_length if chunk is None: # If the chunk received by the handler is None, then don't continue. break except SkipFile, e: # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload, e: if not e.connection_reset: exhaust(limited_input_data) else:
def filter_dict(self): dict = QueryDict(u"", mutable=True) terms = [(term.category.slug, term.slug) for term in self.terms.all()] for key, value in terms: dict.appendlist(key, value) return dict