コード例 #1
0
    def formfield_for_dbfield(self, db_field, **kwargs):
        request = kwargs.pop("request", None)

        # Add a select field of available commands
        if db_field.name == 'command':
            choices_dict = MultiValueDict()
            #l = get_commands().items():
            #l = [('kitsune_base_check', 'kitsune')]
            l = get_kitsune_checks()
            for command, app in l:
                choices_dict.appendlist(app, command)

            choices = []
            for key in choices_dict.keys():
                #if str(key).startswith('<'):
                #    key = str(key)
                commands = choices_dict.getlist(key)
                commands.sort()
                choices.append([key, [[c, c] for c in commands]])

            kwargs['widget'] = forms.widgets.Select(choices=choices)
            return db_field.formfield(**kwargs)
        kwargs['request'] = request
        return super(JobAdmin, self).formfield_for_dbfield(db_field, **kwargs)
コード例 #2
0
 def test_multivaluedict(self):
     d = MultiValueDict({
         'name': ['Adrian', 'Simon'],
         'position': ['Developer']
     })
     self.assertEqual(d['name'], 'Simon')
     self.assertEqual(d.get('name'), 'Simon')
     self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
     self.assertEqual(sorted(d.items()), [('name', 'Simon'),
                                          ('position', 'Developer')])
     self.assertEqual(sorted(d.lists()), [('name', ['Adrian', 'Simon']),
                                          ('position', ['Developer'])])
     with self.assertRaises(MultiValueDictKeyError) as cm:
         d.__getitem__('lastname')
     self.assertEqual(str(cm.exception), "'lastname'")
     self.assertIsNone(d.get('lastname'))
     self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
     self.assertEqual(d.getlist('lastname'), [])
     self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
                      ['Adrian', 'Simon'])
     d.setlist('lastname', ['Holovaty', 'Willison'])
     self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
     self.assertEqual(sorted(d.values()),
                      ['Developer', 'Simon', 'Willison'])
コード例 #3
0
ファイル: http.py プロジェクト: xyxzfj/django
    def test_urlencode(self):
        # 2-tuples (the norm)
        result = http.urlencode((('a', 1), ('b', 2), ('c', 3)))
        self.assertEqual(result, 'a=1&b=2&c=3')

        # A dictionary
        result = http.urlencode({ 'a': 1, 'b': 2, 'c': 3})
        acceptable_results = [
            # Need to allow all of these as dictionaries have to be treated as
            # unordered
            'a=1&b=2&c=3',
            'a=1&c=3&b=2',
            'b=2&a=1&c=3',
            'b=2&c=3&a=1',
            'c=3&a=1&b=2',
            'c=3&b=2&a=1'
        ]
        self.assertTrue(result in acceptable_results)
        result = http.urlencode({'a': [1, 2]}, doseq=False)
        self.assertEqual(result, 'a=%5B%271%27%2C+%272%27%5D')
        result = http.urlencode({'a': [1, 2]}, doseq=True)
        self.assertEqual(result, 'a=1&a=2')
        result = http.urlencode({'a': []}, doseq=True)
        self.assertEqual(result, '')

        # A MultiValueDict
        result = http.urlencode(MultiValueDict({
            'name': ['Adrian', 'Simon'],
            'position': ['Developer']
        }), doseq=True)
        acceptable_results = [
            # MultiValueDicts are similarly unordered
            'name=Adrian&name=Simon&position=Developer',
            'position=Developer&name=Adrian&name=Simon'
        ]
        self.assertTrue(result in acceptable_results)
コード例 #4
0
    def test_get_4v4(self):
        p1 = self.db.create_player(region=Region.EU, realm=1, bid=301)
        p2 = self.db.create_player(region=Region.EU, realm=1, bid=302)
        p3 = self.db.create_player(region=Region.EU, realm=1, bid=303)
        p4 = self.db.create_player(region=Region.EU, realm=1, bid=304)
        t = self.db.create_team(mode=Mode.TEAM_4V4,
                                member0=p1,
                                member1=p2,
                                member2=p3,
                                member3=p4)

        qp = MultiValueDict()
        qp.setlist('player', [
            'http://eu.battle.net/sc2/en/profile/304/1/xyz',
            'http://eu.battle.net/sc2/en/profile/303/1/xyz',
            'http://eu.battle.net/sc2/en/profile/302/1/xyz',
            'http://eu.battle.net/sc2/en/profile/301/1/xyz',
        ])
        qp['mode'] = 'team-4v4'
        response = self.c.get('/team/id/', qp)

        self.assertEqual(200, response.status_code)
        data = json.loads(response.content.decode('utf-8'))
        self.assertEqual(t.id, data['team_id'])
コード例 #5
0
ファイル: middleware.py プロジェクト: FGlazov/il2_stats
    def middleware(request):
        # Code to be executed for each request before
        # the view (and later middleware) are called.

        tour_id = request.GET.get('tour')
        if tour_id:
            try:
                request.tour = Tour.objects.get(id=tour_id)
            except Tour.DoesNotExist:
                params = MultiValueDict(request.GET)
                del params['tour']
                return redirect('{url}?{params}'.format(url=request.path, params=urlencode(query=params, doseq=1)))
        else:
            try:
                request.tour = Tour.objects.get_or_create(is_ended=False)[0]
            except Tour.MultipleObjectsReturned:
                request.tour = Tour.objects.filter(is_ended=False).order_by('-id')[0]

        response = get_response(request)

        # Code to be executed for each request/response after
        # the view is called.

        return response
コード例 #6
0
ファイル: resolvers.py プロジェクト: sureshmarri90/django-1
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     if getattr(self._local, 'populating', False):
         return
     try:
         self._local.populating = True
         lookups = MultiValueDict()
         namespaces = {}
         apps = {}
         language_code = get_language()
         for url_pattern in reversed(self.url_patterns):
             p_pattern = url_pattern.pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(url_pattern, URLPattern):
                 self._callback_strs.add(url_pattern.lookup_str)
                 bits = normalize(url_pattern.pattern.regex.pattern)
                 lookups.appendlist(
                     url_pattern.callback,
                     (bits, p_pattern, url_pattern.default_args,
                      url_pattern.pattern.converters))
                 if url_pattern.name is not None:
                     lookups.appendlist(
                         url_pattern.name,
                         (bits, p_pattern, url_pattern.default_args,
                          url_pattern.pattern.converters))
             else:  # url_pattern is a URLResolver.
                 url_pattern._populate()
                 if url_pattern.app_name:
                     apps.setdefault(url_pattern.app_name,
                                     []).append(url_pattern.namespace)
                     namespaces[url_pattern.namespace] = (p_pattern,
                                                          url_pattern)
                 else:
                     for name in url_pattern.reverse_dict:
                         for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(
                                 name):
                             new_matches = normalize(p_pattern + pat)
                             lookups.appendlist(
                                 name, (new_matches, p_pattern + pat, {
                                     **defaults,
                                     **url_pattern.default_kwargs
                                 }, {
                                     **self.pattern.converters,
                                     **url_pattern.pattern.converters,
                                     **converters
                                 }))
                     for namespace, (
                             prefix, sub_pattern
                     ) in url_pattern.namespace_dict.items():
                         current_converters = url_pattern.pattern.converters
                         sub_pattern.pattern.converters.update(
                             current_converters)
                         namespaces[namespace] = (p_pattern + prefix,
                                                  sub_pattern)
                     for app_name, namespace_list in url_pattern.app_dict.items(
                     ):
                         apps.setdefault(app_name,
                                         []).extend(namespace_list)
                 self._callback_strs.update(url_pattern._callback_strs)
         self._namespace_dict[language_code] = namespaces
         self._app_dict[language_code] = apps
         self._reverse_dict[language_code] = lookups
         self._populated = True
     finally:
         self._local.populating = False
コード例 #7
0
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Return a tuple containing the POST and FILES dictionary, respectively.
        """
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(
                self._input_data,
                self._meta,
                self._content_length,
                self._boundary,
                encoding,
            )
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict(mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_str(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                    num_post_keys += 1
                    if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                            and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                            num_post_keys):
                        raise TooManyFieldsSent(
                            'The number of GET/POST parameters exceeded '
                            'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.')

                    # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                    if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                        read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read(size=read_size)
                        num_bytes_read += len(raw_data)
                        try:
                            data = base64.b64decode(raw_data)
                        except binascii.Error:
                            data = raw_data
                    else:
                        data = field_stream.read(size=read_size)
                        num_bytes_read += len(data)

                    # Add two here to make the check consistent with the
                    # x-www-form-urlencoded check that includes '&='.
                    num_bytes_read += len(field_name) + 2
                    if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
                            and num_bytes_read >
                            settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                        raise RequestDataTooBig(
                            'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
                        )

                    self._post.appendlist(
                        field_name, force_str(data, encoding,
                                              errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if file_name:
                        file_name = force_str(file_name,
                                              encoding,
                                              errors='replace')
                        file_name = self.IE_sanitize(html.unescape(file_name))
                    if not file_name:
                        continue

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(
                                    field_name,
                                    file_name,
                                    content_type,
                                    content_length,
                                    charset,
                                    content_type_extra,
                                )
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 -
                                                                   remaining)
                                    stripped_chunk += b"".join(
                                        over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as exc:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError(
                                        "Could not decode base64 data."
                                    ) from exc

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # Don't continue if the chunk received by
                                    # the handler is None.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        # any() shortcircuits if a handler's upload_complete() returns a value.
        any(handler.upload_complete() for handler in handlers)
        self._post._mutable = False
        return self._post, self._files
コード例 #8
0
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict('', encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data,
                                              self._meta,
                                              self._content_length,
                                              self._boundary,
                                              encoding)
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = base64.b64decode(raw_data)
                        except _BASE64_DECODE_ERROR:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(field_name,
                                          force_text(data, encoding, errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_text(file_name, encoding, errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type, content_type_extra = meta_data.get('content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset, content_type_extra)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 - remaining)
                                    stripped_chunk += b"".join(over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    msg = "Could not decode base64 data: %r" % e
                                    six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(chunk,
                                                                   counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files
コード例 #9
0
ファイル: test_datastructures.py プロジェクト: zw3n/django
 def test_getlist_doesnt_mutate(self):
     x = MultiValueDict({'a': ['1', '2'], 'b': ['3']})
     values = x.getlist('a')
     values += x.getlist('b')
     self.assertEqual(x.getlist('a'), ['1', '2'])
コード例 #10
0
ファイル: test_datastructures.py プロジェクト: zw3n/django
 def test_getlist_none_empty_values(self):
     x = MultiValueDict({'a': None, 'b': []})
     self.assertIsNone(x.getlist('a'))
     self.assertEqual(x.getlist('b'), [])
コード例 #11
0
ファイル: image_storage.py プロジェクト: Arthur264/rest_blog
 def _save(self, name, content):
     data = client.upload(MultiValueDict({"image": [content]}))
     return data['data']['link']
コード例 #12
0
 def test_exclude_fields_comma_separated_from_context(self):
     param_dict = {'exclude_fields': ['a,b']}
     self.mock_request.query_params = MultiValueDict(param_dict)
     serializer = self.serializer(context=self.context)
     self.assertEqual(['c'], serializer.fields.keys())
コード例 #13
0
 def test_exclude_bad_fields_from_context(self):
     param_dict = {'fields': [], 'exclude_fields': ['b', 'd']}
     self.mock_request.query_params = MultiValueDict(param_dict)
     self.assertRaises(FieldError, self.serializer, context=self.context)
コード例 #14
0
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        limited_input_data = LimitBytes(self._input_data, self._content_length)

        # See if the handler will want to take care of the parsing.
        # This allows overriding everything if somebody wants it.
        for handler in handlers:
            result = handler.handle_raw_input(limited_input_data,
                                              self._meta,
                                              self._content_length,
                                              self._boundary,
                                              encoding)
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                field_name = force_unicode(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = str(raw_data).decode('base64')
                        except:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(field_name,
                                          force_unicode(data, encoding, errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_unicode(file_name, encoding, errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type = meta_data.get('content-type', ('',))[0].strip()
                    try:
                        charset = meta_data.get('content-type', (0,{}))[1].get('charset', None)
                    except:
                        charset = None

                    try:
                        content_length = int(meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                try:
                                    chunk = str(chunk).decode('base64')
                                except Exception, e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError("Could not decode base64 data: %r" % e)

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(chunk,
                                                                   counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile, e:
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload, e:
            if not e.connection_reset:
                exhaust(limited_input_data)
コード例 #15
0
def test_addons_form():
    f = forms.AddonsForm(MultiValueDict({'addon': [''],
                                         'addon_comment': ['comment']}))
    assert f.is_valid()
コード例 #16
0
ファイル: message.py プロジェクト: yjstyle/reviewboard
def prepare_base_review_request_mail(user,
                                     review_request,
                                     subject,
                                     in_reply_to,
                                     to_field,
                                     cc_field,
                                     template_name_base,
                                     context=None,
                                     extra_headers=None):
    """Return a customized review request e-mail.

    This is intended to be called by one of the ``prepare_{type}_mail``
    functions in this file. This method builds up a common context that all
    review request-related e-mails will use to render their templates, as well
    as handling user preferences regarding e-mail and add adding additional
    headers.

    Args:
        user (django.contrib.auth.models.User):
            The user who is sending the e-mail.

        review_request (reviewboard.reviews.models.review_request.ReviewRequest):
            The review request this e-mail is regarding.

        subject (unicode):
            The e-mail subject line.

        in_reply_to (unicode):
            The e-mail message ID this message is in response to or ``None``.

        to_field (set):
            The set of :py:class:`~django.contrib.auth.models.User` and
            :py:class`~reviewboard.reviews.models.group.Group`s to this e-mail
            will be sent to.

        cc_field (set):
            The set of :py:class:`~django.contrib.auth.models.User` and
            :py:class`~reviewboard.reviews.models.group.Group`s to be CC'ed on
            the e-mail.

        template_name_base (unicode):
            The name of the template to use to generate the e-mail without its
            extension. The plain-text version of the e-mail will append
            ``.txt`` to this and and the rich-text version of the e-mail will
            append ``.html``.

        context (dict, optional):
            Optional additional template rendering context.

        extra_headers (dict, optional):
            Optional additional headers to include.

    Returns:
        EmailMessage:
        The prepared e-mail message.
    """
    user_email = build_email_address_for_user(user)
    to_field = recipients_to_addresses(to_field, review_request.id)
    cc_field = recipients_to_addresses(cc_field, review_request.id) - to_field

    if not user.should_send_own_updates():
        to_field.discard(user_email)
        cc_field.discard(user_email)

    if not to_field and not cc_field:
        # This e-mail would have no recipients, so we won't send it.
        return None

    if not context:
        context = {}

    context.update({
        'user': user,
        'site_url': get_server_url(),
        'review_request': review_request,
    })
    local_site = review_request.local_site

    if local_site:
        context['local_site_name'] = local_site.name

    text_body = render_to_string('%s.txt' % template_name_base, context)
    html_body = render_to_string('%s.html' % template_name_base, context)
    server_url = get_server_url(local_site=local_site)

    headers = MultiValueDict({
        'X-ReviewBoard-URL': [server_url],
        'X-ReviewRequest-URL': [
            build_server_url(review_request.get_absolute_url(),
                             local_site=local_site)
        ],
        'X-ReviewGroup': [
            ', '.join(
                review_request.target_groups.values_list('name', flat=True))
        ],
    })

    if extra_headers:
        if not isinstance(extra_headers, MultiValueDict):
            extra_headers = MultiValueDict(
                (key, [value]) for key, value in six.iteritems(extra_headers))

        headers.update(extra_headers)

    if review_request.repository:
        headers['X-ReviewRequest-Repository'] = review_request.repository.name

    latest_diffset = review_request.get_latest_diffset()

    if latest_diffset:
        modified_files = set()

        for filediff in latest_diffset.files.all():
            if filediff.deleted or filediff.copied or filediff.moved:
                modified_files.add(filediff.source_file)

            if filediff.is_new or filediff.copied or filediff.moved:
                modified_files.add(filediff.dest_file)

        # The following code segment deals with the case where the client adds
        # a significant amount of files with large names. We limit the number
        # of headers; when more than 8192 characters are reached, we stop
        # adding filename headers.
        current_header_length = 0

        for filename in modified_files:
            current_header_length += (HEADER_ADDITIONAL_CHARACTERS_LENGTH +
                                      len(filename))

            if current_header_length > MAX_FILENAME_HEADERS_LENGTH:
                logging.warning(
                    'Unable to store all filenames in the '
                    'X-ReviewBoard-Diff-For headers when sending e-mail for '
                    'review request %s: The header size exceeds the limit of '
                    '%s. Remaining headers have been omitted.',
                    review_request.display_id, MAX_FILENAME_HEADERS_LENGTH)
                break

            headers.appendlist('X-ReviewBoard-Diff-For', filename)

    if settings.DEFAULT_FROM_EMAIL:
        sender = build_email_address(full_name=user.get_full_name(),
                                     email=settings.DEFAULT_FROM_EMAIL)
    else:
        sender = None

    return EmailMessage(subject=subject.strip(),
                        text_body=text_body.encode('utf-8'),
                        html_body=html_body.encode('utf-8'),
                        from_email=user_email,
                        sender=sender,
                        to=list(to_field),
                        cc=list(cc_field),
                        in_reply_to=in_reply_to,
                        headers=headers)
コード例 #17
0
 def __init__(self, query_params=MultiValueDict(), method="GET"):
     self.query_params = query_params
     self.method = method
コード例 #18
0
ファイル: tests.py プロジェクト: bliuredhat/PDC
 def test_both_init_and_context(self):
     param_dict = {'fields': ['a'], 'exclude_fields': ['b']}
     self.mock_request.query_params = MultiValueDict(param_dict)
     serializer = self.serializer(fields=['c'], exclude_fields=['b'],
                                  context=self.context)
     self.assertEqual(['a', 'c'], serializer.fields.keys())
コード例 #19
0
ファイル: test_datastructures.py プロジェクト: zw3n/django
 def test_getlist_default(self):
     x = MultiValueDict({'a': [1]})
     MISSING = object()
     values = x.getlist('b', default=MISSING)
     self.assertIs(values, MISSING)
コード例 #20
0
    def test_get_value_multi_dictionary_full(self):
        mvd = MultiValueDict({'foo': ['bar1', 'bar2']})
        assert ['bar1', 'bar2'] == self.field.get_value(mvd)

        mvd = MultiValueDict({'baz': ['bar1', 'bar2']})
        assert [] == self.field.get_value(mvd)
コード例 #21
0
ファイル: test_datastructures.py プロジェクト: zw3n/django
 def test_appendlist(self):
     d = MultiValueDict()
     d.appendlist('name', 'Adrian')
     d.appendlist('name', 'Simon')
     self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
コード例 #22
0
    def test_manytomany_filter_multiple(self):
        qs = Book.objects.all()

        # Specific example - multiple filtering
        emily = Author.objects.get(name='Emily Brontë')
        charlotte = Author.objects.get(name='Charlotte Brontë')
        anne = Author.objects.get(name='Anne Brontë')

        # If we select 'emily' as an author:

        data = MultiValueDict({'authors': [str(emily.pk)]})
        with self.assertNumQueries(1):
            # 1 query for all chosen objects
            filter1 = ManyToManyFilter('authors', Book, data)

        with self.assertNumQueries(0):
            # This shouldn't need to do any more queries
            qs_emily = filter1.apply_filter(qs)

        # ...we should get a qs that includes Poems and Wuthering Heights.
        self.assertTrue(qs_emily.filter(name='Poems').exists())
        self.assertTrue(qs_emily.filter(name='Wuthering Heights').exists())
        # ...and excludes Jane Eyre
        self.assertFalse(qs_emily.filter(name='Jane Eyre').exists())

        with self.assertNumQueries(2):
            # 0 query for all chosen objects (already done)
            # 1 query for available objects
            # 1 query for counts
            choices = filter1.get_choices(qs_emily)

        # We should have a 'choices' that includes charlotte and anne
        self.assertTrue(
            text_type(anne) in
            [c.label for c in choices if c.link_type is FILTER_ADD])
        self.assertTrue(
            text_type(charlotte) in
            [c.label for c in choices if c.link_type is FILTER_ADD])

        # ... but not emily, because that is obvious and boring
        self.assertTrue(
            text_type(emily) not in
            [c.label for c in choices if c.link_type is FILTER_ADD])
        # emily should be in 'remove' links, however.
        self.assertTrue(
            text_type(emily) in
            [c.label for c in choices if c.link_type is FILTER_REMOVE])

        # Select again - should have sensible params
        anne_choice = [c for c in choices if c.label.startswith('Anne')][0]
        self.assertTrue(
            text_type(emily.pk) in anne_choice.params.getlist('authors'))
        self.assertTrue(
            text_type(anne.pk) in anne_choice.params.getlist('authors'))

        # Now do the second select:
        filter2 = ManyToManyFilter('authors', Book, anne_choice.params)

        qs_emily_anne = filter2.apply_filter(qs)

        # ...we should get a qs that includes Poems
        self.assertTrue(qs_emily_anne.filter(name='Poems').exists())
        # ... but not Wuthering Heights
        self.assertFalse(
            qs_emily_anne.filter(name='Wuthering Heights').exists())

        # The choices should contain just Emily and Anne to remove, and
        # Charlotte should have 'link_type' FILTER_ADD. Even though it
        # is the only choice, adding the choice is not necessarily the same as
        # not adding it (could have books by Emily and Anne, but not Charlotte)
        choices = filter2.get_choices(qs_emily_anne)
        self.assertEqual([(c.label, c.link_type) for c in choices],
                         [(text_type(emily), FILTER_REMOVE),
                          (text_type(anne), FILTER_REMOVE),
                          (text_type(charlotte), FILTER_ADD)])
コード例 #23
0
def _add_form_data(form_data, extra_data):
    return MultiValueDict({
        **{k: [v]
           for k, v in extra_data.items()},
        **form_data
    })
コード例 #24
0
 def test_datetime_filter_invalid_query(self):
     self.do_invalid_query_param_test(
         lambda params: DateTimeFilter(
             'date_published', Book, params, max_links=10),
         MultiValueDict({'date_published': ['1818xx']}))
コード例 #25
0
ファイル: request.py プロジェクト: 1455364690/meituan
 def _mark_post_parse_error(self):
     self._post = QueryDict()
     self._files = MultiValueDict()
コード例 #26
0
    def test_datetime_filter_remove_broad(self):
        """
        If we remove a broader choice (e.g. year), the more specific choices
        (e.g. day) should be removed too.
        """
        # This should hold whichever order the params are defined:
        params1 = MultiValueDict({
            'date_published': [
                '1818-08-24', '1818-08-24..1818-08-30', '1818-08',
                '1818-08..1818-10', '1818..1819', '1818'
            ]
        })
        params2 = MultiValueDict({
            'date_published': [
                '1818..1819',
                '1818',
                '1818-08..1818-10',
                '1818-08',
                '1818-08-24..1818-08-30',
                '1818-08-24',
            ]
        })

        for p in [params1, params2]:
            f = DateTimeFilter('date_published', Book, p)
            qs = Book.objects.all()
            qs_filtered = f.apply_filter(qs)
            choices = f.get_choices(qs_filtered)

            # First choice should be for '1818-1819' and remove all 'date_published'
            self.assertEqual(choices[0].label, '1818-1819')
            self.assertEqual(choices[0].link_type, FILTER_REMOVE)
            self.assertEqual(choices[0].params.getlist('date_published'), [])

            self.assertEqual(choices[1].link_type, FILTER_REMOVE)
            self.assertEqual(choices[1].params.getlist('date_published'),
                             ['1818..1819'])

            self.assertEqual(choices[2].link_type, FILTER_REMOVE)
            self.assertEqual(choices[2].params.getlist('date_published'), [
                '1818..1819',
                '1818',
            ])

            self.assertEqual(choices[3].link_type, FILTER_REMOVE)
            self.assertEqual(choices[3].params.getlist('date_published'), [
                '1818..1819',
                '1818',
                '1818-08..1818-10',
            ])

            self.assertEqual(choices[4].link_type, FILTER_REMOVE)
            self.assertEqual(choices[4].params.getlist('date_published'), [
                '1818..1819',
                '1818',
                '1818-08..1818-10',
                '1818-08',
            ])

            self.assertEqual(choices[5].link_type, FILTER_REMOVE)
            self.assertEqual(choices[5].params.getlist('date_published'), [
                '1818..1819',
                '1818',
                '1818-08..1818-10',
                '1818-08',
                '1818-08-24..1818-08-30',
            ])
コード例 #27
0
 def test_image_form_validation(self):
     data = {'apk': self.album.pk}
     image_path = os.path.join(settings.MEDIA_ROOT, self.image_filenames[0])
     image_files = MultiValueDict({'data': [image_path]})
     form = ImageCreateForm(data, files=image_files)
     form.clean()
 def GET(self, **kwargs):
     from django.utils.datastructures import MultiValueDict
     return MultiValueDict(
         dict(("filter-%s" % k, v) for k,v in kwargs.items()))
コード例 #29
0
def test_translate_results(_1, _2):
    query_params = MultiValueDict(
        {
            "groupBy": ["session.status"],
            "field": [
                "sum(session)",
                "max(session.duration)",
                "p50(session.duration)",
                "p95(session.duration)",
            ],
            "interval": ["1d"],
            "statsPeriod": ["2d"],
        }
    )
    query_definition = QueryDefinition(query_params)

    intervals = list(get_intervals(query_definition))
    results = {
        "metrics_counters": {
            "totals": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,  # session.status:healthy
                        "value": 300,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 0,  # session.status:abnormal
                        "value": 330,
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,
                        "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc),
                        "value": 100,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 0,
                        "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc),
                        "value": 110,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 4,
                        "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc),
                        "value": 200,
                    },
                    {
                        "metric_id": 9,  # session
                        "tags[8]": 0,
                        "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc),
                        "value": 220,
                    },
                ],
            },
        },
        "metrics_distributions": {
            "totals": {
                "data": [
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "max": 123.4,
                        "percentiles": [1, 2, 3, 4, 5],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 0,
                        "max": 456.7,
                        "percentiles": [1.5, 2.5, 3.5, 4.5, 5.5],
                    },
                ],
            },
            "series": {
                "data": [
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc),
                        "max": 10.1,
                        "percentiles": [1.1, 2.1, 3.1, 4.1, 5.1],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 0,
                        "bucketed_time": datetime(2021, 8, 24, tzinfo=pytz.utc),
                        "max": 20.2,
                        "percentiles": [1.2, 2.2, 3.2, 4.2, 5.2],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 4,
                        "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc),
                        "max": 30.3,
                        "percentiles": [1.3, 2.3, 3.3, 4.3, 5.3],
                    },
                    {
                        "metric_id": 7,  # session.duration
                        "tags[8]": 0,
                        "bucketed_time": datetime(2021, 8, 25, tzinfo=pytz.utc),
                        "max": 40.4,
                        "percentiles": [1.4, 2.4, 3.4, 4.4, 5.4],
                    },
                ],
            },
        },
    }

    assert SnubaResultConverter(1, query_definition, intervals, results).translate_results() == [
        {
            "by": {"session.status": "healthy"},
            "totals": {
                "sum(session)": 300,
                "max(session.duration)": 123.4,
                "p50(session.duration)": 1,
                "p95(session.duration)": 4,
            },
            "series": {
                "sum(session)": [100, 200],
                "max(session.duration)": [10.1, 30.3],
                "p50(session.duration)": [1.1, 1.3],
                "p95(session.duration)": [4.1, 4.3],
            },
        },
        {
            "by": {"session.status": "abnormal"},
            "totals": {
                "sum(session)": 330,
                "max(session.duration)": 456.7,
                "p50(session.duration)": 1.5,
                "p95(session.duration)": 4.5,
            },
            "series": {
                "sum(session)": [110, 220],
                "max(session.duration)": [20.2, 40.4],
                "p50(session.duration)": [1.2, 1.4],
                "p95(session.duration)": [4.2, 4.4],
            },
        },
    ]
コード例 #30
0
    def partial_update(self, request, *args, **kwargs):

        # PATCHED: Validate upload session url kwarg
        upload_session_id = kwargs.get('upload_session_id')
        try:
            UploadSession.objects.get(pk=upload_session_id)
        except (UploadSession.DoesNotExist, ValidationError):
            return Response('Wrong upload session uid.',
                            status=status.HTTP_400_BAD_REQUEST)

        # Validate tus header
        if not has_required_tus_header(request):
            return Response('Missing "{}" header.'.format('Tus-Resumable'),
                            status=status.HTTP_400_BAD_REQUEST)

        # Validate content type
        if not self._is_valid_content_type(request):
            return Response(
                'Invalid value for "Content-Type" header: {}. Expected "{}".'.
                format(request.META['CONTENT_TYPE'],
                       TusUploadStreamParser.media_type),
                status=status.HTTP_400_BAD_REQUEST)

        # Retrieve object
        upload = self.get_object()

        # Get upload_offset
        upload_offset = getattr(request, constants.UPLOAD_OFFSET_NAME)

        # Validate upload_offset
        if upload_offset != upload.upload_offset:
            raise Conflict

        # Make sure there is a tempfile for the upload
        assert upload.get_or_create_temporary_file()

        # Change state
        if upload.state == states.INITIAL:
            upload.start_receiving()
            upload.save()

        # Get chunk from request
        chunk_bytes = self.get_chunk(request)

        # Check for data
        if not chunk_bytes:
            return Response('No data.', status=status.HTTP_400_BAD_REQUEST)

        # Check checksum  (http://tus.io/protocols/resumable-upload.html#checksum)
        upload_checksum = getattr(request,
                                  constants.UPLOAD_CHECKSUM_FIELD_NAME, None)
        if upload_checksum is not None:
            if upload_checksum[0] not in tus_api_checksum_algorithms:
                return Response('Unsupported Checksum Algorithm: {}.'.format(
                    upload_checksum[0]),
                                status=status.HTTP_400_BAD_REQUEST)
            elif not checksum_matches(upload_checksum[0], upload_checksum[1],
                                      chunk_bytes):
                return Response('Checksum Mismatch.', status=460)

        # Run chunk validator
        chunk_bytes = self.validate_chunk(upload_offset, chunk_bytes)

        # Check for data
        if not chunk_bytes:
            return Response(
                'No data. Make sure "validate_chunk" returns data.',
                status=status.HTTP_400_BAD_REQUEST)

        # Write file
        chunk_size = int(request.META.get('CONTENT_LENGTH', 102400))
        try:
            upload.write_data(chunk_bytes, chunk_size)
        except Exception as e:
            return Response(str(e), status=status.HTTP_400_BAD_REQUEST)

        headers = {
            'Upload-Offset': upload.upload_offset,
        }

        response_data = None

        if upload.upload_length == upload.upload_offset:
            # PATCHED: re-send request to our native upload() method
            from apps.project.api.v1 import UploadSessionViewSet

            file = UploadedFile(file=open(upload.temporary_file_path, 'rb'),
                                name=upload.filename,
                                size=upload.upload_length)
            request._files = MultiValueDict()
            request._files['file'] = file

            try:
                directory_path = os.path.dirname(
                    json.loads(upload.upload_metadata)['relativePath'])
            except (KeyError, TypeError, json.JSONDecodeError):
                directory_path = None

            response = UploadSessionViewSet(request=request,
                                            format_kwarg=upload_session_id,
                                            action='upload',
                                            kwargs={
                                                'pk': upload_session_id
                                            }).upload(
                                                request=request,
                                                pk=upload_session_id,
                                                review_file=False,
                                                directory_path=directory_path)
            if response.status_code != 200:
                return response
            response_data = response.data

            # Trigger signal
            signals.received.send(sender=upload.__class__, instance=upload)

        # Add upload expiry to headers
        add_expiry_header(upload, headers)

        return Response(data=response_data,
                        headers=headers,
                        status=status.HTTP_204_NO_CONTENT)