示例#1
0
    def test_initial_call_with_params(self):
        get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
        response = self.client.get(reverse('%s_start' % self.wizard_urlname),
                                   get_params)
        self.assertEqual(response.status_code, 302)

        # Test for proper redirect GET parameters
        location = response['Location']
        self.assertNotEqual(location.find('?'), -1)
        querydict = QueryDict(location[location.find('?') + 1:])
        self.assertEqual(dict(querydict.items()), get_params)
示例#2
0
        def test_invalid_input_encoding(self):
            """
            QueryDicts must be able to handle invalid input encoding (in this
            case, bad UTF-8 encoding).

            This test doesn't apply under Python 3 because the URL is a string
            and not a bytestring.
            """
            q = QueryDict(str(b'foo=bar&foo=\xff'))
            self.assertEqual(q['foo'], '\ufffd')
            self.assertEqual(q.getlist('foo'), ['bar', '\ufffd'])
示例#3
0
 def test_immutable_basic_operations(self):
     q = QueryDict(str(''))
     self.assertEqual(q.getlist('foo'), [])
     if not six.PY3:
         self.assertEqual(q.has_key('foo'), False)
     self.assertEqual('foo' in q, False)
     self.assertEqual(list(six.iteritems(q)), [])
     self.assertEqual(list(six.iterlists(q)), [])
     self.assertEqual(list(six.iterkeys(q)), [])
     self.assertEqual(list(six.itervalues(q)), [])
     self.assertEqual(len(q), 0)
     self.assertEqual(q.urlencode(), '')
示例#4
0
 def test_non_default_encoding(self):
     """#13572 - QueryDict with a non-default encoding"""
     q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
     self.assertEqual(q.encoding, 'iso-8859-15')
     self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
     self.assertEqual(q.urlencode(), 'cur=%A4')
     q = q.copy()
     self.assertEqual(q.encoding, 'iso-8859-15')
     self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
     self.assertEqual(q.urlencode(), 'cur=%A4')
     self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
     self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
示例#5
0
def redirect_to_login(next, login_url=None,
                      redirect_field_name=REDIRECT_FIELD_NAME):
    """
    Redirects the user to the login page, passing the given 'next' page
    """
    # urlparse chokes on lazy objects in Python 3
    login_url_as_str = force_str(login_url or settings.LOGIN_URL)

    login_url_parts = list(urlparse(login_url_as_str))
    if redirect_field_name:
        querystring = QueryDict(login_url_parts[4], mutable=True)
        querystring[redirect_field_name] = next
        login_url_parts[4] = querystring.urlencode(safe='/')

    return HttpResponseRedirect(urlunparse(login_url_parts))
示例#6
0
    def test_multiple_keys(self):
        """Test QueryDict with two key/value pairs with same keys."""

        q = QueryDict(str('vote=yes&vote=no'))

        self.assertEqual(q['vote'], 'no')
        self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')

        self.assertEqual(q.get('vote', 'default'), 'no')
        self.assertEqual(q.get('foo', 'default'), 'default')
        self.assertEqual(q.getlist('vote'), ['yes', 'no'])
        self.assertEqual(q.getlist('foo'), [])

        self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
        self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
        self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])

        if not six.PY3:
            self.assertEqual(q.has_key('vote'), True)
        self.assertEqual('vote' in q, True)
        if not six.PY3:
            self.assertEqual(q.has_key('foo'), False)
        self.assertEqual('foo' in q, False)
        self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
        self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
        self.assertEqual(list(six.iterkeys(q)), ['vote'])
        self.assertEqual(list(six.itervalues(q)), ['no'])
        self.assertEqual(len(q), 1)

        self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
        self.assertRaises(AttributeError, q.pop, 'foo')
        self.assertRaises(AttributeError, q.popitem)
        self.assertRaises(AttributeError, q.clear)
        self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
        self.assertRaises(AttributeError, q.__delitem__, 'vote')
示例#7
0
 def test_urlencode(self):
     q = QueryDict(str(''), mutable=True)
     q['next'] = '/a&b/'
     self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
     self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
     q = QueryDict(str(''), mutable=True)
     q['next'] = '/t\xebst&key/'
     self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
     self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
示例#8
0
    def test_single_key_value(self):
        """Test QueryDict with one key/value pair"""

        q = QueryDict(str('foo=bar'))
        self.assertEqual(q['foo'], 'bar')
        self.assertRaises(KeyError, q.__getitem__, 'bar')
        self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')

        self.assertEqual(q.get('foo', 'default'), 'bar')
        self.assertEqual(q.get('bar', 'default'), 'default')
        self.assertEqual(q.getlist('foo'), ['bar'])
        self.assertEqual(q.getlist('bar'), [])

        self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
        self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])

        if not six.PY3:
            self.assertTrue(q.has_key('foo'))
        self.assertTrue('foo' in q)
        if not six.PY3:
            self.assertFalse(q.has_key('bar'))
        self.assertFalse('bar' in q)

        self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
        self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
        self.assertEqual(list(six.iterkeys(q)), ['foo'])
        self.assertEqual(list(six.itervalues(q)), ['bar'])
        self.assertEqual(len(q), 1)

        self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
        self.assertRaises(AttributeError, q.pop, 'foo')
        self.assertRaises(AttributeError, q.popitem)
        self.assertRaises(AttributeError, q.clear)
        self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')

        self.assertEqual(q.urlencode(), 'foo=bar')
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from djangocg.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(MultiValueDict(), encoding=self._encoding), MultiValueDict()

        # See if the handler will want to take care of the parsing.
        # This allows overriding everything if somebody wants it.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data,
                                              self._meta,
                                              self._content_length,
                                              self._boundary,
                                              encoding)
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = str(raw_data).decode('base64')
                        except:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(field_name,
                                          force_text(data, encoding, errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_text(file_name, encoding, errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type = meta_data.get('content-type', ('',))[0].strip()
                    try:
                        charset = meta_data.get('content-type', (0,{}))[1].get('charset', None)
                    except:
                        charset = None

                    try:
                        content_length = int(meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError("Could not decode base64 data: %r" % e)

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(chunk,
                                                                   counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files
示例#10
0
class MultiPartParser(object):
    """
    A rfc2388 multipart/form-data parser.

    ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
    and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
    """
    def __init__(self, META, input_data, upload_handlers, encoding=None):
        """
        Initialize the MultiPartParser object.

        :META:
            The standard ``META`` dictionary in Django request objects.
        :input_data:
            The raw post data, as a file-like object.
        :upload_handler:
            An UploadHandler instance that performs operations on the uploaded
            data.
        :encoding:
            The encoding with which to treat the incoming data.
        """

        #
        # Content-Type should containt multipart and the boundary information.
        #

        content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
        if not content_type.startswith('multipart/'):
            raise MultiPartParserError('Invalid Content-Type: %s' % content_type)

        # Parse the header to get the boundary to split the parts.
        ctypes, opts = parse_header(content_type.encode('ascii'))
        boundary = opts.get('boundary')
        if not boundary or not cgi.valid_boundary(boundary):
            raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)


        # Content-Length should contain the length of the body we are about
        # to receive.
        try:
            content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
        except (ValueError, TypeError):
            content_length = 0

        if content_length < 0:
            # This means we shouldn't continue...raise an error.
            raise MultiPartParserError("Invalid content length: %r" % content_length)

        if isinstance(boundary, six.text_type):
            boundary = boundary.encode('ascii')
        self._boundary = boundary
        self._input_data = input_data

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
        self._chunk_size = min([2**31-4] + possible_sizes)

        self._meta = META
        self._encoding = encoding or settings.DEFAULT_CHARSET
        self._content_length = content_length
        self._upload_handlers = upload_handlers

    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from djangocg.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(MultiValueDict(), encoding=self._encoding), MultiValueDict()

        # See if the handler will want to take care of the parsing.
        # This allows overriding everything if somebody wants it.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data,
                                              self._meta,
                                              self._content_length,
                                              self._boundary,
                                              encoding)
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = str(raw_data).decode('base64')
                        except:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(field_name,
                                          force_text(data, encoding, errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_text(file_name, encoding, errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type = meta_data.get('content-type', ('',))[0].strip()
                    try:
                        charset = meta_data.get('content-type', (0,{}))[1].get('charset', None)
                    except:
                        charset = None

                    try:
                        content_length = int(meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError("Could not decode base64 data: %r" % e)

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(chunk,
                                                                   counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files

    def handle_file_complete(self, old_field_name, counters):
        """
        Handle all the signalling that takes place when a file is complete.
        """
        for i, handler in enumerate(self._upload_handlers):
            file_obj = handler.file_complete(counters[i])
            if file_obj:
                # If it returns a file object, then set the files dict.
                self._files.appendlist(force_text(old_field_name,
                                                     self._encoding,
                                                     errors='replace'),
                                       file_obj)
                break

    def IE_sanitize(self, filename):
        """Cleanup filename from Internet Explorer full paths."""
        return filename and filename[filename.rfind("\\")+1:].strip()
示例#11
0
 def test_remote_login_url_with_next_querystring(self):
     login_url = "http://remote.example.com/login/"
     login_required_url = self.get_login_required_url("%s?next=/default/" % login_url)
     querystring = QueryDict("", mutable=True)
     querystring["next"] = "http://testserver/login_required/"
     self.assertEqual(login_required_url, "%s?%s" % (login_url, querystring.urlencode("/")))
示例#12
0
 def test_login_url_with_querystring(self):
     login_url = "/login/?pretty=1"
     login_required_url = self.get_login_required_url(login_url)
     querystring = QueryDict("pretty=1", mutable=True)
     querystring["next"] = "/login_required/"
     self.assertEqual(login_required_url, "http://testserver/login/?%s" % querystring.urlencode("/"))
示例#13
0
 def test_https_login_url(self):
     login_url = "https:///login/"
     login_required_url = self.get_login_required_url(login_url)
     querystring = QueryDict("", mutable=True)
     querystring["next"] = "http://testserver/login_required/"
     self.assertEqual(login_required_url, "%s?%s" % (login_url, querystring.urlencode("/")))
示例#14
0
 def test_immutable_get_with_default(self):
     q = QueryDict(str(''))
     self.assertEqual(q.get('foo', 'default'), 'default')
示例#15
0
 def test_update_from_querydict(self):
     """Regression test for #8278: QueryDict.update(QueryDict)"""
     x = QueryDict(str("a=1&a=2"), mutable=True)
     y = QueryDict(str("a=3&a=4"))
     x.update(y)
     self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
示例#16
0
    def test_basic_mutable_operations(self):
        q = QueryDict(str('')).copy()
        q['name'] = 'john'
        self.assertEqual(q.get('foo', 'default'), 'default')
        self.assertEqual(q.get('name', 'default'), 'john')
        self.assertEqual(q.getlist('name'), ['john'])
        self.assertEqual(q.getlist('foo'), [])

        q.setlist('foo', ['bar', 'baz'])
        self.assertEqual(q.get('foo', 'default'), 'baz')
        self.assertEqual(q.getlist('foo'), ['bar', 'baz'])

        q.appendlist('foo', 'another')
        self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
        self.assertEqual(q['foo'], 'another')
        if not six.PY3:
            self.assertTrue(q.has_key('foo'))
        self.assertTrue('foo' in q)

        self.assertEqual(list(six.iteritems(q)),  [('foo', 'another'), ('name', 'john')])
        self.assertEqual(list(six.iterlists(q)), [('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
        self.assertEqual(list(six.iterkeys(q)), ['foo', 'name'])
        self.assertEqual(list(six.itervalues(q)), ['another', 'john'])
        self.assertEqual(len(q), 2)

        q.update({'foo': 'hello'})
        self.assertEqual(q['foo'], 'hello')
        self.assertEqual(q.get('foo', 'not available'), 'hello')
        self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
        self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
        self.assertEqual(q.pop('foo', 'not there'), 'not there')
        self.assertEqual(q.get('foo', 'not there'), 'not there')
        self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
        self.assertEqual(q['foo'], 'bar')
        self.assertEqual(q.getlist('foo'), ['bar'])
        self.assertEqual(q.urlencode(), 'foo=bar&name=john')

        q.clear()
        self.assertEqual(len(q), 0)