Exemple #1
0
    def test_multivaluedict(self):
        d = MultiValueDict({'name': ['Adrian', 'Simon'],
                            'position': ['Developer']})

        self.assertEqual(d['name'], 'Simon')
        self.assertEqual(d.get('name'), 'Simon')
        self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
        self.assertEqual(list(six.iteritems(d)),
                          [('position', 'Developer'), ('name', 'Simon')])

        self.assertEqual(list(six.iterlists(d)),
                          [('position', ['Developer']),
                           ('name', ['Adrian', 'Simon'])])

        # MultiValueDictKeyError: "Key 'lastname' not found in
        # <MultiValueDict: {'position': ['Developer'],
        #                   'name': ['Adrian', 'Simon']}>"
        self.assertRaisesMessage(MultiValueDictKeyError,
            '"Key \'lastname\' not found in <MultiValueDict: {\'position\':'\
            ' [\'Developer\'], \'name\': [\'Adrian\', \'Simon\']}>"',
            d.__getitem__, 'lastname')

        self.assertEqual(d.get('lastname'), None)
        self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
        self.assertEqual(d.getlist('lastname'), [])
        self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
                         ['Adrian', 'Simon'])

        d.setlist('lastname', ['Holovaty', 'Willison'])
        self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
        self.assertEqual(list(six.itervalues(d)),
                          ['Developer', 'Simon', 'Willison'])
Exemple #2
0
 def status_update(self):
     self.debug('{0} running'.format(SCHEDULE_DESC))
     now = datetime.datetime.now()
     notifications = TagNotification.objects.filter(sent=False)
     notifications = notifications.select_related().order_by('tag', 'entry')
     self.info('found {0} notifications'.format(notifications.count()))
     users = {}
     for notification in notifications:
         email = notification.user.email
         if email not in users:
             users[email] = []
         users[email].append(notification)
     for email, notifications in users.iteritems():
         tags = MultiValueDict()
         for notification in notifications:
             tags.appendlist(notification.tag, notification)
         context = {'tags': tags}
         body = render_to_string('tree/digest.txt', context)
         try:
             send_mail(subject='Survey Response Report', message=body,
                       recipient_list=[email],
                       from_email='*****@*****.**',
                       fail_silently=False)
             sent = True
         except smtplib.SMTPException, e:
             self.exception(e)
             sent = False
         if sent:
             for notification in notifications:
                 notification.sent = True
                 notification.date_sent = datetime.datetime.now()
                 notification.save()
             self.info('Sent report to %s' % email)
 def get_query_string(self, new_params=None, remove=None):
     if new_params is None:
         new_params = MultiValueDict()
     if remove is None:
         remove = []
     p = self.params.copy()
     for r in remove:
         for k in p.keys():
             if (k.startswith(r) and r.endswith('__')) or k == r:
                 del p[k]
     if isinstance(new_params, MultiValueDict):
         new_params_items = new_params.lists()
         setter = p.setlist
     else:
         new_params_items = new_params.items()
         setter = p.__setitem__
     for k, v in new_params_items:
         if k in p and v is None:
             del p[k]
         elif v is not None:
             setter(k, v)
     query_string_blocks = []
     for k, l in p.lists():
         query_string_blocks.append('&'.join([u'%s=%s' % (k, v) for v in l]))
     return mark_safe('?' + '&'.join(query_string_blocks).replace(' ', '%20'))
    def test_value_from_datadict_with_missing_choice_rows(self):
        """Testing ConditionsWidget.value_from_datadict with missing choice
        rows
        """
        class MyChoice(BaseConditionIntegerChoice):
            choice_id = 'my-choice'

        choices = ConditionChoices([MyChoice])
        field = ConditionsField(choices=choices)

        data = MultiValueDict('')
        data.update({
            'my_conditions_mode': 'any',
            'my_conditions_last_id': '5',
            'my_conditions_choice[5]': 'my-choice',
            'my_conditions_operator[5]': 'is-not',
            'my_conditions_value[5]': 'my-value',
        })

        self.assertEqual(
            field.widget.value_from_datadict(data, MultiValueDict(''),
                                             'my_conditions'),
            {
                'mode': 'any',
                'conditions': [
                    {
                        'choice': 'my-choice',
                        'op': 'is-not',
                        'value': 'my-value',
                    },
                ],
            })
 def get_files(self):
     files = MultiValueDict({})
     for step, step_data in self.request.session[self.prefix][self.step_data_session_key].items():
         if step_data.has_key('files'):
             for file in step_data.getlist('files'):
                 files.appendlist(step+'-file', self.file_storage.open(file.get('path')))
     return files
Exemple #6
0
 def __init__(self, handler=None, obj=None, prefix='', formdata=None, **kwargs):
     if handler:
         self.translate_obj = TornadoWTFTranslator(handler.locale)
         formdata = MultiValueDict()
         for name in handler.request.arguments.keys():
             formdata.setlist(name, handler.get_arguments(name))
     Form.__init__(self, formdata, obj=obj, prefix=prefix, **kwargs)
    def test_multivaluedict(self):
        d = MultiValueDict({"name": ["Adrian", "Simon"], "position": ["Developer"]})

        self.assertEqual(d["name"], "Simon")
        self.assertEqual(d.get("name"), "Simon")
        self.assertEqual(d.getlist("name"), ["Adrian", "Simon"])
        self.assertEqual(sorted(list(six.iteritems(d))), [("name", "Simon"), ("position", "Developer")])

        self.assertEqual(sorted(list(six.iterlists(d))), [("name", ["Adrian", "Simon"]), ("position", ["Developer"])])

        # MultiValueDictKeyError: "Key 'lastname' not found in
        # <MultiValueDict: {'position': ['Developer'],
        #                   'name': ['Adrian', 'Simon']}>"
        six.assertRaisesRegex(
            self, MultiValueDictKeyError, r'"Key \'lastname\' not found in <MultiValueDict', d.__getitem__, "lastname"
        )

        self.assertEqual(d.get("lastname"), None)
        self.assertEqual(d.get("lastname", "nonexistent"), "nonexistent")
        self.assertEqual(d.getlist("lastname"), [])
        self.assertEqual(d.getlist("doesnotexist", ["Adrian", "Simon"]), ["Adrian", "Simon"])

        d.setlist("lastname", ["Holovaty", "Willison"])
        self.assertEqual(d.getlist("lastname"), ["Holovaty", "Willison"])
        self.assertEqual(sorted(list(six.itervalues(d))), ["Developer", "Simon", "Willison"])
Exemple #8
0
    def test_multivaluedict(self):
        d = MultiValueDict({'name': ['Adrian', 'Simon'],
                            'position': ['Developer']})

        self.assertEqual(d['name'], 'Simon')
        self.assertEqual(d.get('name'), 'Simon')
        self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
        self.assertEqual(sorted(list(six.iteritems(d))),
                          [('name', 'Simon'), ('position', 'Developer')])

        self.assertEqual(sorted(list(six.iterlists(d))),
                          [('name', ['Adrian', 'Simon']),
                           ('position', ['Developer'])])

        six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
            d.__getitem__, 'lastname')

        self.assertEqual(d.get('lastname'), None)
        self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
        self.assertEqual(d.getlist('lastname'), [])
        self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
                         ['Adrian', 'Simon'])

        d.setlist('lastname', ['Holovaty', 'Willison'])
        self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
        self.assertEqual(sorted(list(six.itervalues(d))),
                         ['Developer', 'Simon', 'Willison'])
 def data(self):
     "Return a compressed dictionary of all data from all subforms"
     all_data = MultiValueDict()
     for prefix, form in self.forms.items():
         for key in form.data:
             all_data.setlist(key, form.data.getlist(key))
     return all_data
Exemple #10
0
    def get_form_kwargs(self):
        """
        Get the kwargs to pass to the form for this script. By default, returns the task
        arguments.

        - 'data' list entries converted into a querydict.
        - 'files' list entries converted to File objects
        """
        kwargs = self.task.arguments.copy() # don't modify self.task.arguments['data']
        if 'data' in kwargs:
            d = QueryDict('').copy()
            for k, v in kwargs['data'].iteritems():
                if isinstance(v, list):
                    d.setlist(k, v)
                else:
                    d[k] = v
            kwargs['data'] = d

        # Convert file dictionaries (as supplied by get_temporary_file_dict) to
        # SimpleUploadedFile objects which Django understands.
        if 'files' in kwargs:
            files = MultiValueDict(kwargs['files'])
            for filedict_list in files.viewvalues():
                for i, fdict in enumerate(filedict_list):
                    if isinstance(fdict, dict):
                        fdict = dict(fdict)
                        fdict["content"] = open(fdict["path"], "rb").read()
                        filedict_list[i] = SimpleUploadedFile.from_dict(fdict)
            kwargs['files'] = files

        return kwargs
 def files(self):
     "Return a compressed dictionary of all files from all subforms"
     all_files = MultiValueDict()
     for prefix, form in self.forms.items():
         for key in form.files:
             all_files.setlist(key, form.files.getlist(key))
     return all_files
Exemple #12
0
 def test_encode_multipart_data_multiple_params(self):
     # Sequences of parameters and files can be passed to
     # encode_multipart_data() so that multiple parameters/files with the
     # same name can be provided.
     params_in = [
         ("one", "ABC"),
         ("one", "XYZ"),
         ("two", "DEF"),
         ("two", "UVW"),
         ]
     files_in = [
         ("f-one", BytesIO(urandom(32))),
         ("f-two", BytesIO(urandom(32))),
         ]
     body, headers = encode_multipart_data(params_in, files_in)
     self.assertEqual("%s" % len(body), headers["Content-Length"])
     self.assertThat(
         headers["Content-Type"],
         StartsWith("multipart/form-data; boundary="))
     # Round-trip through Django's multipart code.
     params_out, files_out = (
         parse_headers_and_body_with_django(headers, body))
     params_out_expected = MultiValueDict()
     for name, value in params_in:
         params_out_expected.appendlist(name, value)
     self.assertEqual(
         params_out_expected, params_out,
         ahem_django_ahem)
     self.assertSetEqual({"f-one", "f-two"}, set(files_out))
     files_expected = {name: buf.getvalue() for name, buf in files_in}
     files_observed = {name: buf.read() for name, buf in files_out.items()}
     self.assertEqual(
         files_expected, files_observed,
         ahem_django_ahem)
def parse_html_dict(dictionary, prefix=''):
    """
    Used to support dictionary values in HTML forms.

    {
        'profile.username': '******',
        'profile.email': '*****@*****.**',
    }
        -->
    {
        'profile': {
            'username': '******',
            'email': '*****@*****.**'
        }
    }
    """
    ret = MultiValueDict()
    regex = re.compile(r'^%s\.(.+)$' % re.escape(prefix))
    for field in dictionary:
        match = regex.match(field)
        if not match:
            continue
        key = match.groups()[0]
        value = dictionary.getlist(field)
        ret.setlist(key, value)

    return ret
Exemple #14
0
def parse_file_upload(header_dict, post_data):
    """Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
    import email, email.Message
    from cgi import parse_header
    raw_message = '\r\n'.join(['%s:%s' % pair for pair in header_dict.items()])
    raw_message += '\r\n\r\n' + post_data
    msg = email.message_from_string(raw_message)
    POST = QueryDict('', mutable=True)
    FILES = MultiValueDict()
    for submessage in msg.get_payload():
        if submessage and isinstance(submessage, email.Message.Message):
            name_dict = parse_header(submessage['Content-Disposition'])[1]
            # name_dict is something like {'name': 'file', 'filename': 'test.txt'} for file uploads
            # or {'name': 'blah'} for POST fields
            # We assume all uploaded files have a 'filename' set.
            if 'filename' in name_dict:
                assert type([]) != type(submessage.get_payload()), "Nested MIME messages are not supported"
                if not name_dict['filename'].strip():
                    continue
                # IE submits the full path, so trim everything but the basename.
                # (We can't use os.path.basename because that uses the server's
                # directory separator, which may not be the same as the
                # client's one.)
                filename = name_dict['filename'][name_dict['filename'].rfind("\\")+1:]
                FILES.appendlist(name_dict['name'], FileDict({
                    'filename': filename,
                    'content-type': 'Content-Type' in submessage and submessage['Content-Type'] or None,
                    'content': submessage.get_payload(),
                }))
            else:
                POST.appendlist(name_dict['name'], submessage.get_payload())
    return POST, FILES
    def render(self, context):
        url, extra_params = self.get_url(context)

        if 'request' not in context:
            raise ValueError('`request` needed in context for GET query processing')

        params = MultiValueDict(context['request'].GET.iterlists())
        params.update(MultiValueDict(extra_params))

        query = process_query(
            params,
            self.keep and [v.resolve(context) for v in self.keep],
            self.exclude and [v.resolve(context) for v in self.exclude],
            self.add and dict([(k, v.resolve(context)) for k, v in self.add.iteritems()]),
            self.remove and dict([(k, v.resolve(context)) for k, v in self.remove.iteritems()]),
        )
        if query:
            url += '?' + query

        url = escape(url)

        if self._asvar:
            context[self._asvar] = url
            return ''
        else:
            return url
Exemple #16
0
    def handle_files(self, reqfiles):
        """
            Moves all temporary files into persistent location. Allows storing
            files in the session.
        """

        ret = MultiValueDict()
        for (name, files) in reqfiles.iterlists():
            ls = []
            for file in files:
                # [FIXME: exception can be raised, we don't want to simply lose it
                # (with the file) so allow Http500 and logging of this error]
                if getattr(settings, 'WIZARD_UPLOAD_TEMP_DIR', None):
                    (dst, dstpath) = tempfile.mkstemp(suffix = '.upload', dir = settings.WIZARD_UPLOAD_TEMP_DIR)
                else:
                    (dst, dstpath) = tempfile.mkstemp(suffix = '.upload')

                dstfile = open(dstpath, "w+b")
                for chunk in file.chunks():
                    dstfile.write(chunk)

                dstfile.flush() #Don't close the file, django expects an uploaded file to be open !
                os.close(dst)

                hash = os.path.basename(dstpath)
                hash = hash[:-len('.upload')]

                ls.append(RefUploadedFile(hash, dstfile, file.name, file.content_type, file.size, file.charset))
            # end for
            ret.setlist(name, ls)
        # end for

        return ret
Exemple #17
0
    def test_dict_translation(self):
        mvd = MultiValueDict({"devs": ["Bob", "Joe"], "pm": ["Rory"]})
        d = mvd.dict()
        self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
        for key in six.iterkeys(mvd):
            self.assertEqual(d[key], mvd[key])

        self.assertEqual({}, MultiValueDict().dict())
Exemple #18
0
 def __init__(self, request, kargs, view):
     self.request = request
     self.view = view
     self.view_kargs = kargs
     data = MultiValueDict()
     for x in request.GET.copy(), request.POST.copy(), kargs:
         data.update(x)
     super(PreRunForm, self).__init__(data=data)
Exemple #19
0
 def resolve_params(self, text):
     " Parse the parameters into a dict. "
     params = MultiValueDict()
     for line in text.split('\n'):
         pair = line.split(':', 1)
         if len(pair) == 2:
             params.appendlist(pair[0].strip(), pair[1].strip())
     return params
Exemple #20
0
def _do_export(survey, subject_type, pk):
    facts = models.Fact.objects\
            .filter(survey=survey, content_type=subject_type, object_id=pk)\
            .select_related('desired_fact')

    export_data = MultiValueDict()
    for fact in facts:
        export_data.appendlist(fact.desired_fact.code, fact.data)
    return export_data
def unserialize(serialized_object):
    if not isinstance(serialized_object, (str, unicode)):
        raise TypeError("The parameter given must be a string")
    unserialized = json.loads(serialized_object)
    out = MultiValueDict()
    # Naively converting a dict() to MultiValueDict() loses information
    for key, value in unserialized.items():
        out.setlist(key, value)
    return out
Exemple #22
0
 def __init__(self, query_string, mutable=False, encoding=None):
     MultiValueDict.__init__(self)
     if not encoding:
         encoding = settings.DEFAULT_CHARSET
     self.encoding = encoding
     for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
         self.appendlist(force_unicode(key, encoding, errors='replace'),
                         force_unicode(value, encoding, errors='replace'))
     self._mutable = mutable
Exemple #23
0
 def update(self, other_dict):
     self._assert_mutable()
     f = lambda s: str_to_unicode(s, self.encoding)
     if hasattr(other_dict, 'lists'):
         for key, valuelist in other_dict.lists():
             for value in valuelist:
                 MultiValueDict.update(self, {f(key): f(value)})
     else:
         d = dict([(f(k), f(v)) for k, v in other_dict.items()])
         MultiValueDict.update(self, d)
    def _build_sub_query(self, search_node):
        terms = MultiValueDict()

        for child in search_node.children:
            if isinstance(child, SearchNode):
                terms.update(self._build_sub_query(child))
            else:
                terms.appendlist(child[0], child[1])

        return terms
Exemple #25
0
 def __init__(self, request=None, obj=None, prefix='', formdata=None, **kwargs):
   if request:
     if isinstance(request, dict):
       arguments = request
     else:
       arguments = request.arguments
     formdata = MultiValueDict()
     for name, values in arguments.items():
       formdata.setlist(name, [sanitize_parameter_value(v) for v in values])
   Form.__init__(self, formdata, obj=obj, prefix=prefix, **kwargs)
Exemple #26
0
def jquery_to_dict(values):
    result = MultiValueDict()
    if not isinstance(values, (list, tuple, set)):
        return result
    for value in values:
        if not isinstance(value, dict):
            continue
        if 'name' in value and 'value' in value:
            result.appendlist(value['name'], value['value'])
    return result
Exemple #27
0
    def _get_request_for_batch(ctx, resource, data):
        resource_result = {}
        get_data = MultiValueDict()
        get_data.update(data)
        content_dict = process_get_request(ctx, resource, get_data)
        resource_result['status'] = 200
        resource_result['etag'] = get_sha1(ctx, content_dict)
        resource_result['data'] = content_dict

        return resource_result
Exemple #28
0
class MakeMonthArrayNode (Node):
    
    def __init__ (self):
        self.year = None
        self.month = None
        self.data = MultiValueDict()
        self._weekday = None    # Weekday (Monday==0) for day 1 in this moth.
        self._n_days = None     # Number of days in month.
        
        
    def _n_added_days(self):
        """Number of added days at the end of the month to have a "full week"."""
        return ( 7 - (self._weekday + self._n_days)%7 ) % 7
    
    
    def __list2array__ (self, l, ncols=7):
        """Splits list l in lists of len()=cols. l must be "squareable".
        """
        return [ l[row*ncols:(row+1)*ncols] for row in range(0, len(l)/ncols) ]


    def squared (self):
        """Inserts keys and values in an array of days (month's calendar).
        
        Returns a list of weeks.
        One week, one list of days.
        One day, one list with [ <day-number> , [<data for this day>] ]
        """
        
        # Insert [None] in remaining days. IN PLACE.
        for i in range(1, self._n_days+1):
            if not self.data.has_key(i):
                self.data[i] = None
        
        # zerofill out-of-month days
        before = self._weekday * [[0,[None]]]
        after = self._n_added_days() * [[0,[None]]]
        # cat three lists and split per weeks
        return self.__list2array__ (before + self.data.lists() + after)
    
    
    
    def render(self, context):
        try:
            self.year = context['month'].year
            self.month = context['month'].month
            self._weekday, self._n_days = monthrange(self.year, self.month)
                
            for o in context['object_list']:
                self.data.appendlist (o.startdate.day, o)
                
            context['month_array'] = self.squared()
        except:
            pass
        return ''
def update_multivaluedict(old, new):
    """Use instead of MultiValueDict.update()

    MultiValueDict.update() appends instead of overwrites, so a helper function
    is needed if overwrite is what you want.
    """
    if not old:
        return new
    if not new:
        return old
    out = MultiValueDict()
    new = new.copy()
    for key, value in old.lists():
        if not key in new:
            out.setlist(key, value)
            continue
        newvalue = new.getlist(key)
        if newvalue == value:
            out.setlist(key, value)
            new.pop(key)
            continue
        out.setlist(key, newvalue)
    for key, value in new.lists():
        out.setlist(key, value)
    return out
Exemple #30
0
    def test_dict_translation(self):
        mvd = MultiValueDict({
            'devs': ['Bob', 'Joe'],
            'pm': ['Rory'],
        })
        d = mvd.dict()
        self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
        for key in six.iterkeys(mvd):
            self.assertEqual(d[key], mvd[key])

        self.assertEqual({}, MultiValueDict().dict())
Exemple #31
0
 def popitem(self):
     self._assert_mutable()
     return MultiValueDict.popitem(self)
Exemple #32
0
 def _mark_post_parse_error(self):
     self._post = QueryDict('')
     self._files = MultiValueDict()
     self._post_parse_error = True
 def test_getlist_none_empty_values(self):
     x = MultiValueDict({'a': None, 'b': []})
     self.assertIsNone(x.getlist('a'))
     self.assertEqual(x.getlist('b'), [])
Exemple #34
0
 def test_image_create_form(self):
     data = {'apk': self.album.pk}
     image_path = os.path.join(settings.MEDIA_ROOT, self.image_filename)
     image_files = MultiValueDict({'data': [image_path]})
     form = ImageCreateForm(data, files=image_files)
     form.clean()
Exemple #35
0
 def setlistdefault(self, key, default_list=()):
     self._assert_mutable()
     if key not in self:
         self.setlist(key, default_list)
     return MultiValueDict.getlist(self, key)
Exemple #36
0
 def appendlist(self, key, value):
     self._assert_mutable()
     key = str_to_unicode(key, self.encoding)
     value = str_to_unicode(value, self.encoding)
     MultiValueDict.appendlist(self, key, value)
Exemple #37
0
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Return a tuple containing the POST and FILES dictionary, respectively.
        """
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(
                self._input_data,
                self._meta,
                self._content_length,
                self._boundary,
                encoding,
            )
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict(mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None
        # Whether a file upload is finished.
        uploaded_file = True

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None
                    uploaded_file = True

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_str(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                    num_post_keys += 1
                    if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                            and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                            num_post_keys):
                        raise TooManyFieldsSent(
                            'The number of GET/POST parameters exceeded '
                            'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.')

                    # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                    if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                        read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read(size=read_size)
                        num_bytes_read += len(raw_data)
                        try:
                            data = base64.b64decode(raw_data)
                        except binascii.Error:
                            data = raw_data
                    else:
                        data = field_stream.read(size=read_size)
                        num_bytes_read += len(data)

                    # Add two here to make the check consistent with the
                    # x-www-form-urlencoded check that includes '&='.
                    num_bytes_read += len(field_name) + 2
                    if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
                            and num_bytes_read >
                            settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                        raise RequestDataTooBig(
                            'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
                        )

                    self._post.appendlist(
                        field_name, force_str(data, encoding,
                                              errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if file_name:
                        file_name = force_str(file_name,
                                              encoding,
                                              errors='replace')
                        file_name = self.sanitize_file_name(file_name)
                    if not file_name:
                        continue

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    uploaded_file = False
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(
                                    field_name,
                                    file_name,
                                    content_type,
                                    content_length,
                                    charset,
                                    content_type_extra,
                                )
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 -
                                                                   remaining)
                                    stripped_chunk += b"".join(
                                        over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as exc:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError(
                                        "Could not decode base64 data."
                                    ) from exc

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # Don't continue if the chunk received by
                                    # the handler is None.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            if not uploaded_file:
                for handler in handlers:
                    handler.upload_interrupted()
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        # any() shortcircuits if a handler's upload_complete() returns a value.
        any(handler.upload_complete() for handler in handlers)
        self._post._mutable = False
        return self._post, self._files
    def test_multivaluedict(self):
        d = MultiValueDict({
            'name': ['Adrian', 'Simon'],
            'position': ['Developer']
        })

        self.assertEqual(d['name'], 'Simon')
        self.assertEqual(d.get('name'), 'Simon')
        self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
        self.assertEqual(sorted(list(six.iteritems(d))),
                         [('name', 'Simon'), ('position', 'Developer')])

        self.assertEqual(sorted(list(six.iterlists(d))),
                         [('name', ['Adrian', 'Simon']),
                          ('position', ['Developer'])])

        six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
                              d.__getitem__, 'lastname')

        self.assertEqual(d.get('lastname'), None)
        self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
        self.assertEqual(d.getlist('lastname'), [])
        self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
                         ['Adrian', 'Simon'])

        d.setlist('lastname', ['Holovaty', 'Willison'])
        self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
        self.assertEqual(sorted(list(six.itervalues(d))),
                         ['Developer', 'Simon', 'Willison'])
Exemple #39
0
 def clear(self):
     self._assert_mutable()
     MultiValueDict.clear(self)
Exemple #40
0
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     if getattr(self._local, 'populating', False):
         return
     try:
         self._local.populating = True
         lookups = MultiValueDict()
         namespaces = {}
         apps = {}
         language_code = get_language()
         for url_pattern in reversed(self.url_patterns):
             p_pattern = url_pattern.pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(url_pattern, URLPattern):
                 self._callback_strs.add(url_pattern.lookup_str)
                 bits = normalize(url_pattern.pattern.regex.pattern)
                 lookups.appendlist(
                     url_pattern.callback,
                     (bits, p_pattern, url_pattern.default_args,
                      url_pattern.pattern.converters))
                 if url_pattern.name is not None:
                     lookups.appendlist(
                         url_pattern.name,
                         (bits, p_pattern, url_pattern.default_args,
                          url_pattern.pattern.converters))
             else:  # url_pattern is a URLResolver.
                 url_pattern._populate()
                 if url_pattern.app_name:
                     apps.setdefault(url_pattern.app_name,
                                     []).append(url_pattern.namespace)
                     namespaces[url_pattern.namespace] = (p_pattern,
                                                          url_pattern)
                 else:
                     for name in url_pattern.reverse_dict:
                         for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(
                                 name):
                             new_matches = normalize(p_pattern + pat)
                             lookups.appendlist(
                                 name, (new_matches, p_pattern + pat, {
                                     **defaults,
                                     **url_pattern.default_kwargs
                                 }, {
                                     **self.pattern.converters,
                                     **url_pattern.pattern.converters,
                                     **converters
                                 }))
                     for namespace, (
                             prefix, sub_pattern
                     ) in url_pattern.namespace_dict.items():
                         current_converters = url_pattern.pattern.converters
                         sub_pattern.pattern.converters.update(
                             current_converters)
                         namespaces[namespace] = (p_pattern + prefix,
                                                  sub_pattern)
                     for app_name, namespace_list in url_pattern.app_dict.items(
                     ):
                         apps.setdefault(app_name,
                                         []).extend(namespace_list)
                 self._callback_strs.update(url_pattern._callback_strs)
         self._namespace_dict[language_code] = namespaces
         self._app_dict[language_code] = apps
         self._reverse_dict[language_code] = lookups
         self._populated = True
     finally:
         self._local.populating = False
 def test_appendlist(self):
     d = MultiValueDict()
     d.appendlist('name', 'Adrian')
     d.appendlist('name', 'Simon')
     self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
Exemple #42
0
    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict('', encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data, self._meta,
                                              self._content_length,
                                              self._boundary, encoding)
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = base64.b64decode(raw_data)
                        except _BASE64_DECODE_ERROR:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(
                        field_name, force_text(data,
                                               encoding,
                                               errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_text(file_name,
                                           encoding,
                                           errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset, content_type_extra)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always read base64 streams by multiple of 4
                                over_bytes = len(chunk) % 4
                                if over_bytes:
                                    over_chunk = field_stream.read(4 -
                                                                   over_bytes)
                                    chunk += over_chunk

                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    msg = "Could not decode base64 data: %r" % e
                                    six.reraise(MultiPartParserError,
                                                MultiPartParserError(msg),
                                                sys.exc_info()[2])

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files
Exemple #43
0
class MultiPartParser(object):
    """
    A rfc2388 multipart/form-data parser.

    ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
    and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
    """
    def __init__(self, META, input_data, upload_handlers, encoding=None):
        """
        Initialize the MultiPartParser object.

        :META:
            The standard ``META`` dictionary in Django request objects.
        :input_data:
            The raw post data, as a file-like object.
        :upload_handlers:
            A list of UploadHandler instances that perform operations on the uploaded
            data.
        :encoding:
            The encoding with which to treat the incoming data.
        """

        #
        # Content-Type should contain multipart and the boundary information.
        #

        content_type = META.get('HTTP_CONTENT_TYPE',
                                META.get('CONTENT_TYPE', ''))
        if not content_type.startswith('multipart/'):
            raise MultiPartParserError('Invalid Content-Type: %s' %
                                       content_type)

        # Parse the header to get the boundary to split the parts.
        ctypes, opts = parse_header(content_type.encode('ascii'))
        boundary = opts.get('boundary')
        if not boundary or not cgi.valid_boundary(boundary):
            raise MultiPartParserError('Invalid boundary in multipart: %s' %
                                       boundary)

        # Content-Length should contain the length of the body we are about
        # to receive.
        try:
            content_length = int(
                META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
        except (ValueError, TypeError):
            content_length = 0

        if content_length < 0:
            # This means we shouldn't continue...raise an error.
            raise MultiPartParserError("Invalid content length: %r" %
                                       content_length)

        if isinstance(boundary, six.text_type):
            boundary = boundary.encode('ascii')
        self._boundary = boundary
        self._input_data = input_data

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        possible_sizes = [
            x.chunk_size for x in upload_handlers if x.chunk_size
        ]
        self._chunk_size = min([2**31 - 4] + possible_sizes)

        self._meta = META
        self._encoding = encoding or settings.DEFAULT_CHARSET
        self._content_length = content_length
        self._upload_handlers = upload_handlers

    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict('', encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data, self._meta,
                                              self._content_length,
                                              self._boundary, encoding)
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = base64.b64decode(raw_data)
                        except _BASE64_DECODE_ERROR:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(
                        field_name, force_text(data,
                                               encoding,
                                               errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_text(file_name,
                                           encoding,
                                           errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset, content_type_extra)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always read base64 streams by multiple of 4
                                over_bytes = len(chunk) % 4
                                if over_bytes:
                                    over_chunk = field_stream.read(4 -
                                                                   over_bytes)
                                    chunk += over_chunk

                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    msg = "Could not decode base64 data: %r" % e
                                    six.reraise(MultiPartParserError,
                                                MultiPartParserError(msg),
                                                sys.exc_info()[2])

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files

    def handle_file_complete(self, old_field_name, counters):
        """
        Handle all the signaling that takes place when a file is complete.
        """
        for i, handler in enumerate(self._upload_handlers):
            file_obj = handler.file_complete(counters[i])
            if file_obj:
                # If it returns a file object, then set the files dict.
                self._files.appendlist(
                    force_text(old_field_name,
                               self._encoding,
                               errors='replace'), file_obj)
                break

    def IE_sanitize(self, filename):
        """Cleanup filename from Internet Explorer full paths."""
        return filename and filename[filename.rfind("\\") + 1:].strip()
Exemple #44
0
class HttpRequest(object):
    """A basic HTTP request."""

    # The encoding used in GET/POST dicts. None means use default setting.
    _encoding = None
    _upload_handlers = []

    def __init__(self):
        # WARNING: The `WSGIRequest` subclass doesn't call `super`.
        # Any variable assignment made here should also happen in
        # `WSGIRequest.__init__()`.

        self.GET = QueryDict(mutable=True)
        self.POST = QueryDict(mutable=True)
        self.COOKIES = {}
        self.META = {}
        self.FILES = MultiValueDict()

        self.path = ''
        self.path_info = ''
        self.method = None
        self.resolver_match = None
        self._post_parse_error = False

    def __repr__(self):
        if self.method is None or not self.get_full_path():
            return force_str('<%s>' % self.__class__.__name__)
        return force_str('<%s: %s %r>' % (self.__class__.__name__, self.method,
                                          force_str(self.get_full_path())))

    def get_host(self):
        """Returns the HTTP host using the environment or request headers."""
        # We try three options, in order of decreasing preference.
        if settings.USE_X_FORWARDED_HOST and ('HTTP_X_FORWARDED_HOST'
                                              in self.META):
            host = self.META['HTTP_X_FORWARDED_HOST']
        elif 'HTTP_HOST' in self.META:
            host = self.META['HTTP_HOST']
        else:
            # Reconstruct the host using the algorithm from PEP 333.
            host = self.META['SERVER_NAME']
            server_port = str(self.META['SERVER_PORT'])
            if server_port != ('443' if self.is_secure() else '80'):
                host = '%s:%s' % (host, server_port)

        # There is no hostname validation when DEBUG=True
        if settings.DEBUG:
            return host

        domain, port = split_domain_port(host)
        if domain and validate_host(domain, settings.ALLOWED_HOSTS):
            return host
        else:
            msg = "Invalid HTTP_HOST header: %r." % host
            if domain:
                msg += " You may need to add %r to ALLOWED_HOSTS." % domain
            else:
                msg += " The domain name provided is not valid according to RFC 1034/1035."
            raise DisallowedHost(msg)

    def get_full_path(self):
        # RFC 3986 requires query string arguments to be in the ASCII range.
        # Rather than crash if this doesn't happen, we encode defensively.
        return '%s%s' % (escape_uri_path(self.path),
                         ('?' + iri_to_uri(self.META.get('QUERY_STRING', '')))
                         if self.META.get('QUERY_STRING', '') else '')

    def get_signed_cookie(self,
                          key,
                          default=RAISE_ERROR,
                          salt='',
                          max_age=None):
        """
        Attempts to return a signed cookie. If the signature fails or the
        cookie has expired, raises an exception... unless you provide the
        default argument in which case that value will be returned instead.
        """
        try:
            cookie_value = self.COOKIES[key]
        except KeyError:
            if default is not RAISE_ERROR:
                return default
            else:
                raise
        try:
            value = signing.get_cookie_signer(salt=key + salt).unsign(
                cookie_value, max_age=max_age)
        except signing.BadSignature:
            if default is not RAISE_ERROR:
                return default
            else:
                raise
        return value

    def build_absolute_uri(self, location=None):
        """
        Builds an absolute URI from the location and the variables available in
        this request. If no ``location`` is specified, the absolute URI is
        built on ``request.get_full_path()``. Anyway, if the location is
        absolute, it is simply converted to an RFC 3987 compliant URI and
        returned and if location is relative or is scheme-relative (i.e.,
        ``//example.com/``), it is urljoined to a base URL constructed from the
        request variables.
        """
        if location is None:
            # Make it an absolute url (but schemeless and domainless) for the
            # edge case that the path starts with '//'.
            location = '//%s' % self.get_full_path()
        bits = urlsplit(location)
        if not (bits.scheme and bits.netloc):
            current_uri = '{scheme}://{host}{path}'.format(
                scheme=self.scheme, host=self.get_host(), path=self.path)
            # Join the constructed URL with the provided location, which will
            # allow the provided ``location`` to apply query strings to the
            # base path as well as override the host, if it begins with //
            location = urljoin(current_uri, location)
        return iri_to_uri(location)

    def _get_scheme(self):
        """
        Hook for subclasses like WSGIRequest to implement. Returns 'http' by
        default.
        """
        return 'http'

    @property
    def scheme(self):
        if settings.SECURE_PROXY_SSL_HEADER:
            try:
                header, value = settings.SECURE_PROXY_SSL_HEADER
            except ValueError:
                raise ImproperlyConfigured(
                    'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
                )
            if self.META.get(header, None) == value:
                return 'https'
        return self._get_scheme()

    def is_secure(self):
        return self.scheme == 'https'

    def is_ajax(self):
        return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'

    @property
    def encoding(self):
        return self._encoding

    @encoding.setter
    def encoding(self, val):
        """
        Sets the encoding used for GET/POST accesses. If the GET or POST
        dictionary has already been created, it is removed and recreated on the
        next access (so that it is decoded correctly).
        """
        self._encoding = val
        if hasattr(self, '_get'):
            del self._get
        if hasattr(self, '_post'):
            del self._post

    def _initialize_handlers(self):
        self._upload_handlers = [
            uploadhandler.load_handler(handler, self)
            for handler in settings.FILE_UPLOAD_HANDLERS
        ]

    @property
    def upload_handlers(self):
        if not self._upload_handlers:
            # If there are no upload handlers defined, initialize them from settings.
            self._initialize_handlers()
        return self._upload_handlers

    @upload_handlers.setter
    def upload_handlers(self, upload_handlers):
        if hasattr(self, '_files'):
            raise AttributeError(
                "You cannot set the upload handlers after the upload has been processed."
            )
        self._upload_handlers = upload_handlers

    def parse_file_upload(self, META, post_data):
        """Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
        self.upload_handlers = ImmutableList(
            self.upload_handlers,
            warning=
            "You cannot alter upload handlers after the upload has been processed."
        )
        parser = MultiPartParser(META, post_data, self.upload_handlers,
                                 self.encoding)
        return parser.parse()

    @property
    def body(self):
        if not hasattr(self, '_body'):
            if self._read_started:
                raise RawPostDataException(
                    "You cannot access body after reading from request's data stream"
                )
            try:
                self._body = self.read()
            except IOError as e:
                six.reraise(UnreadablePostError, UnreadablePostError(*e.args),
                            sys.exc_info()[2])
            self._stream = BytesIO(self._body)
        return self._body

    def _mark_post_parse_error(self):
        self._post = QueryDict('')
        self._files = MultiValueDict()
        self._post_parse_error = True

    def _load_post_and_files(self):
        """Populate self._post and self._files if the content-type is a form type"""
        if self.method != 'POST':
            self._post, self._files = QueryDict(
                '', encoding=self._encoding), MultiValueDict()
            return
        if self._read_started and not hasattr(self, '_body'):
            self._mark_post_parse_error()
            return

        if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
            if hasattr(self, '_body'):
                # Use already read data
                data = BytesIO(self._body)
            else:
                data = self
            try:
                self._post, self._files = self.parse_file_upload(
                    self.META, data)
            except MultiPartParserError:
                # An error occurred while parsing POST data. Since when
                # formatting the error the request handler might access
                # self.POST, set self._post and self._file to prevent
                # attempts to parse POST data again.
                # Mark that an error occurred. This allows self.__repr__ to
                # be explicit about it instead of simply representing an
                # empty POST
                self._mark_post_parse_error()
                raise
        elif self.META.get('CONTENT_TYPE',
                           '').startswith('application/x-www-form-urlencoded'):
            self._post, self._files = QueryDict(
                self.body, encoding=self._encoding), MultiValueDict()
        else:
            self._post, self._files = QueryDict(
                '', encoding=self._encoding), MultiValueDict()

    def close(self):
        if hasattr(self, '_files'):
            for f in chain.from_iterable(l[1] for l in self._files.lists()):
                f.close()

    # File-like and iterator interface.
    #
    # Expects self._stream to be set to an appropriate source of bytes by
    # a corresponding request subclass (e.g. WSGIRequest).
    # Also when request data has already been read by request.POST or
    # request.body, self._stream points to a BytesIO instance
    # containing that data.

    def read(self, *args, **kwargs):
        self._read_started = True
        try:
            return self._stream.read(*args, **kwargs)
        except IOError as e:
            six.reraise(UnreadablePostError, UnreadablePostError(*e.args),
                        sys.exc_info()[2])

    def readline(self, *args, **kwargs):
        self._read_started = True
        try:
            return self._stream.readline(*args, **kwargs)
        except IOError as e:
            six.reraise(UnreadablePostError, UnreadablePostError(*e.args),
                        sys.exc_info()[2])

    def xreadlines(self):
        while True:
            buf = self.readline()
            if not buf:
                break
            yield buf

    __iter__ = xreadlines

    def readlines(self):
        return list(iter(self))
 def test_getlist_default(self):
     x = MultiValueDict({'a': [1]})
     MISSING = object()
     values = x.getlist('b', default=MISSING)
     self.assertIs(values, MISSING)
Exemple #46
0
bad_email = "someone@nothing"
good_email = "*****@*****.**"

fake = FakerFactory.create('en_US', includes=['intake.tests.mock_county_forms'])

RAW_FORM_DATA = MultiValueDict({
    'address.city': [''],
    'address.state': ['CA'],
    'address.street': [''],
    'address.zip': [''],
    'dob.day': [''],
    'dob.month': [''],
    'dob.year': [''],
    'drivers_license_number': [''],
    'email': [''],
    'first_name': [''],
    'how_did_you_hear': [''],
    'last_name': [''],
    'middle_name': [''],
    'monthly_expenses': [''],
    'monthly_income': [''],
    'phone_number': [''],
    'ssn': [''],
    'when_probation_or_parole': [''],
    'when_where_outside_sf': [''],
    'where_probation_or_parole': ['']
})

NEW_RAW_FORM_DATA = {
    'address.city': '',
    'address.state': 'CA',
    'address.street': '',
Exemple #47
0
 def pop(self, key, *args):
     self._assert_mutable()
     return MultiValueDict.pop(self, key, *args)
Exemple #48
0
 def update(self, other_dict):
     self._assert_mutable()
     f = lambda s: str_to_unicode(s, self.encoding)
     d = dict([(f(k), f(v)) for k, v in other_dict.items()])
     MultiValueDict.update(self, d)
Exemple #49
0
class MultiPartParser:
    """
    A rfc2388 multipart/form-data parser.

    ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
    and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
    """
    def __init__(self, META, input_data, upload_handlers, encoding=None):
        """
        Initialize the MultiPartParser object.

        :META:
            The standard ``META`` dictionary in Django request objects.
        :input_data:
            The raw post data, as a file-like object.
        :upload_handlers:
            A list of UploadHandler instances that perform operations on the
            uploaded data.
        :encoding:
            The encoding with which to treat the incoming data.
        """
        # Content-Type should contain multipart and the boundary information.
        content_type = META.get('CONTENT_TYPE', '')
        if not content_type.startswith('multipart/'):
            raise MultiPartParserError('Invalid Content-Type: %s' %
                                       content_type)

        # Parse the header to get the boundary to split the parts.
        try:
            ctypes, opts = parse_header(content_type.encode('ascii'))
        except UnicodeEncodeError:
            raise MultiPartParserError(
                'Invalid non-ASCII Content-Type in multipart: %s' %
                force_str(content_type))
        boundary = opts.get('boundary')
        if not boundary or not cgi.valid_boundary(boundary):
            raise MultiPartParserError('Invalid boundary in multipart: %s' %
                                       force_str(boundary))

        # Content-Length should contain the length of the body we are about
        # to receive.
        try:
            content_length = int(META.get('CONTENT_LENGTH', 0))
        except (ValueError, TypeError):
            content_length = 0

        if content_length < 0:
            # This means we shouldn't continue...raise an error.
            raise MultiPartParserError("Invalid content length: %r" %
                                       content_length)

        if isinstance(boundary, str):
            boundary = boundary.encode('ascii')
        self._boundary = boundary
        self._input_data = input_data

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        possible_sizes = [
            x.chunk_size for x in upload_handlers if x.chunk_size
        ]
        self._chunk_size = min([2**31 - 4] + possible_sizes)

        self._meta = META
        self._encoding = encoding or settings.DEFAULT_CHARSET
        self._content_length = content_length
        self._upload_handlers = upload_handlers

    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Return a tuple containing the POST and FILES dictionary, respectively.
        """
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(
                self._input_data,
                self._meta,
                self._content_length,
                self._boundary,
                encoding,
            )
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict(mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None
        # Whether a file upload is finished.
        uploaded_file = True

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None
                    uploaded_file = True

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_str(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                    num_post_keys += 1
                    if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                            and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                            num_post_keys):
                        raise TooManyFieldsSent(
                            'The number of GET/POST parameters exceeded '
                            'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.')

                    # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                    if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                        read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read(size=read_size)
                        num_bytes_read += len(raw_data)
                        try:
                            data = base64.b64decode(raw_data)
                        except binascii.Error:
                            data = raw_data
                    else:
                        data = field_stream.read(size=read_size)
                        num_bytes_read += len(data)

                    # Add two here to make the check consistent with the
                    # x-www-form-urlencoded check that includes '&='.
                    num_bytes_read += len(field_name) + 2
                    if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
                            and num_bytes_read >
                            settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                        raise RequestDataTooBig(
                            'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
                        )

                    self._post.appendlist(
                        field_name, force_str(data, encoding,
                                              errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if file_name:
                        file_name = force_str(file_name,
                                              encoding,
                                              errors='replace')
                        file_name = self.sanitize_file_name(file_name)
                    if not file_name:
                        continue

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    uploaded_file = False
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(
                                    field_name,
                                    file_name,
                                    content_type,
                                    content_length,
                                    charset,
                                    content_type_extra,
                                )
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 -
                                                                   remaining)
                                    stripped_chunk += b"".join(
                                        over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as exc:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError(
                                        "Could not decode base64 data."
                                    ) from exc

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # Don't continue if the chunk received by
                                    # the handler is None.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            if not uploaded_file:
                for handler in handlers:
                    handler.upload_interrupted()
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        # any() shortcircuits if a handler's upload_complete() returns a value.
        any(handler.upload_complete() for handler in handlers)
        self._post._mutable = False
        return self._post, self._files

    def handle_file_complete(self, old_field_name, counters):
        """
        Handle all the signaling that takes place when a file is complete.
        """
        for i, handler in enumerate(self._upload_handlers):
            file_obj = handler.file_complete(counters[i])
            if file_obj:
                # If it returns a file object, then set the files dict.
                self._files.appendlist(
                    force_str(old_field_name, self._encoding,
                              errors='replace'), file_obj)
                break

    def sanitize_file_name(self, file_name):
        file_name = html.unescape(file_name)
        # Cleanup Windows-style path separators.
        file_name = file_name[file_name.rfind('\\') + 1:].strip()
        return os.path.basename(file_name)

    IE_sanitize = sanitize_file_name

    def _close_files(self):
        # Free up all file handles.
        # FIXME: this currently assumes that upload handlers store the file as 'file'
        # We should document that... (Maybe add handler.free_file to complement new_file)
        for handler in self._upload_handlers:
            if hasattr(handler, 'file'):
                handler.file.close()
Exemple #50
0
 def setlist(self, key, list_):
     self._assert_mutable()
     key = str_to_unicode(key, self.encoding)
     list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
     MultiValueDict.setlist(self, key, list_)
def register_or_upload(request):
    if request.method != 'POST':
        return HttpResponseBadRequest('Only post requests are supported')

    name = request.POST.get('name', None).strip()

    if not name:
        return HttpResponseBadRequest('No package name specified')

    try:
        package = Package.objects.get(name=name)
    except Package.DoesNotExist:
        package = Package.objects.create(name=name)
        package.owners.add(request.user)

    if not (conf.GLOBAL_OWNERSHIP or request.user in package.owners.all()
            or request.user in package.maintainers.all()):

        return HttpResponseForbidden('You are not an owner/maintainer of %s' %
                                     (package.name, ))

    version = request.POST.get('version', None)
    if version:
        version = version.strip()

    release, created = Release.objects.get_or_create(package=package,
                                                     version=version)

    metadata_version = request.POST.get('metadata_version', None)
    if not metadata_version:
        metadata_version = release.metadata_version

    if metadata_version:
        metadata_version = metadata_version.strip()

    if not version or not metadata_version:
        transaction.rollback()
        return HttpResponseBadRequest(
            'Release version and metadata version must be specified')

    if not metadata_version in conf.METADATA_FIELDS:
        transaction.rollback()
        return HttpResponseBadRequest('Metadata version must be one of: %s' (
            ', '.join(conf.METADATA_FIELDS.keys()), ))

    if (('classifiers' in request.POST or 'download_url' in request.POST)
            and metadata_version == '1.0'):
        metadata_version = '1.1'

    release.metadata_version = metadata_version

    fields = conf.METADATA_FIELDS[metadata_version]

    if 'classifiers' in request.POST:
        request.POST.setlist('classifier', request.POST.getlist('classifiers'))

    release.package_info = MultiValueDict(
        dict(filter(lambda t: t[0] in fields, request.POST.iterlists())))

    for key, value in release.package_info.iterlists():
        release.package_info.setlist(key,
                                     filter(lambda v: v != 'UNKNOWN', value))

    release.save()
    if not 'content' in request.FILES:
        transaction.commit()
        return HttpResponse('release registered')

    uploaded = request.FILES.get('content')

    for dist in release.distributions.all():
        if os.path.basename(dist.content.name) == uploaded.name:
            """ Need to add handling optionally deleting old and putting up new """
            transaction.rollback()
            return HttpResponseBadRequest(
                'That file has already been uploaded...')

    md5_digest = request.POST.get('md5_digest', '')

    try:
        new_file = Distribution.objects.create(
            release=release,
            file=uploaded,
            filetype=request.POST.get('filetype', 'sdist'),
            pyversion=request.POST.get('pyversion', ''),
            uploader=request.user,
            comment=request.POST.get('comment', ''),
            signature=request.POST.get('gpg_signature', ''),
            md5_digest=md5_digest)
    except Exception, e:
        transaction.rollback()
        print str(e)
 def test_getlist_doesnt_mutate(self):
     x = MultiValueDict({'a': ['1', '2'], 'b': ['3']})
     values = x.getlist('a')
     values += x.getlist('b')
     self.assertEqual(x.getlist('a'), ['1', '2'])
Exemple #53
0
    def __init__(
        self,
        data: Optional[Dict[str, Any]] = None,
        files: Optional[Dict[str, File]] = None,
        instance: Optional["BaseRecord"] = None,
        initial: Optional[Mapping[str, Any]] = None,
        prefix: Optional[str] = None,
        **kwargs: Any,
    ) -> None:
        opts = self._meta  # type: ignore

        # Set the prefix if we were given one (we'll need it for a modifier).
        self.prefix = prefix

        # Ensure that data and files are both mutable so that signal handlers
        # can act before the form is initialized.
        data = data.copy() if data is not None else None
        files = files.copy() if files is not None else None

        # If we don't have an instance (e.g., we're adding a new record), we try
        # to derive the BaseForm from the given parameters.
        #
        # If we can't derive a BaseForm (e.g. we're totally unbound and there's
        # no BaseForm identifier in the initial data) then we behave as a
        # totally normal ModelForm, only presenting the BaseForm's concrete
        # attributes.
        RecordModel = opts.model
        FormModel = RecordModel._flexible_model_for(BaseForm)
        form_field_name = RecordModel.FlexibleMeta.form_field_name

        # Try to get some kind of specifier for the BaseForm we should use.
        #
        #   1. Look in the data parameter to see if one was sumbitted with the
        #      form data.
        #   2. If it wasn't in the form data, see if the instance is related to
        #      a BaseForm.
        #   3. Look in the initial parameter to see if we were given a BaseForm
        #      either manually or by the as_django_form() method on the BaseForm
        #      model.
        #
        # If all of these fail, we'll fall back to behaving as a normal
        # ModelForm: the form will only have fields for the direct model
        # attributes of a BaseRecord until it has a relationship to a BaseForm.
        #
        form = (data or {}).get(form_field_name)
        form = form or getattr(instance, form_field_name, None)
        form = form or (initial or {}).get(form_field_name)

        # Depending on how the RecordForm is being created, the form might be:
        #
        #   3. A ModelChoiceIteratorValue (Django 3.1+), which has an "instance"
        #      property containing the form instance that we unpack.
        #   4. A primary key value (an int or a string) that we use to query
        #      for the BaseForm.
        #   1. None or a BaseForm instance, in which case we do nothing.
        if django.VERSION >= (3, 1) and isinstance(form, ModelChoiceIteratorValue):
            form = form.instance
        if isinstance(form, int) or (isinstance(form, str) and form.isdigit()):
            form = FormModel.objects.get(pk=form)

        # If the form is bound, make sure that data holds a reference to the
        # form object, and disable the form field.
        is_bound = data is not None or files is not None
        if is_bound:
            data = cast(Dict[str, Any], data or MultiValueDict())
            data[form_field_name] = form

        # Inject the instance's _data (form field values) into the initial dict.
        # If we weren't given an instance, we make a new one (but don't persist
        # it) for consistency.
        instance = instance or opts.model(**{form_field_name: form})
        initial = {
            **instance._data,
            **(initial or {}),
            form_field_name: form,
        }

        # If any of the form fields have a "_value" attribute, use it in either
        # the data (if the form is bound) and/or the initial (if the form is
        # unbound).
        modified_fields = {
            k: v for k, v in self.base_fields.items() if hasattr(v, "_value")
        }
        for field_name, field in modified_fields.items():
            field_name = self.add_prefix(field_name)

            try:
                field_value = field._value  # type: ignore
            except AttributeError:
                continue

            # Set the initial value.
            initial[field_name] = field_value

            # For unbound forms, data and files are both None, so we can't set
            # values in them and we continue on.
            if not is_bound:
                continue

            files = cast(Dict[str, File], files or MultiValueDict())
            data = cast(Dict[str, Any], data or MultiValueDict())
            value = data.get(field_name, files.get(field_name))

            # If the field was already assigned a non-empty value, don't try to
            # overwrite it.
            if value not in field.empty_values:
                continue

            # Set the appropriate data element (files for FileFields, data for
            # everything else) to the field's new value.
            if isinstance(field, forms.FileField):
                files[field_name] = field_value
            else:
                data[field_name] = field_value

            # Unset the initial value so that the automatically-set value is
            # detected as a change when the form is saved.
            initial.pop(field_name, None)

        # Emit a signal before initializing the form.
        pre_form_init.send(
            sender=self.__class__,
            form=self,
            data=data,
            files=files,
            instance=instance,
            initial=initial,
        )

        # Initialize the form as usual.
        super().__init__(
            data=data, files=files, instance=instance, initial=initial, **kwargs
        )

        # Hide and disable the form input if the BaseRecord is already persisted
        # with a relationship to its BaseForm.
        if form is not None and form_field_name in self.fields:
            form_field = self.fields[form_field_name]
            form_field.widget = HiddenInput()
            form_field.disabled = instance.pk and getattr(
                instance, f"{form_field_name}_id", None
            )

        # Emit a signal after initializing the form.
        post_form_init.send(
            sender=self.__class__,
            form=self,
        )
 def test_multivaluedict(self):
     d = MultiValueDict({
         'name': ['Adrian', 'Simon'],
         'position': ['Developer']
     })
     self.assertEqual(d['name'], 'Simon')
     self.assertEqual(d.get('name'), 'Simon')
     self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
     self.assertEqual(sorted(d.items()), [('name', 'Simon'),
                                          ('position', 'Developer')])
     self.assertEqual(sorted(d.lists()), [('name', ['Adrian', 'Simon']),
                                          ('position', ['Developer'])])
     with self.assertRaises(MultiValueDictKeyError) as cm:
         d.__getitem__('lastname')
     self.assertEqual(str(cm.exception), "'lastname'")
     self.assertIsNone(d.get('lastname'))
     self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
     self.assertEqual(d.getlist('lastname'), [])
     self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
                      ['Adrian', 'Simon'])
     d.setlist('lastname', ['Holovaty', 'Willison'])
     self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
     self.assertEqual(sorted(d.values()),
                      ['Developer', 'Simon', 'Willison'])
Exemple #55
0
def post_data(**kwargs):
    for key, value in kwargs.items():
        if isinstance(value, str):
            kwargs[key] = [value] 
    return MultiValueDict(kwargs)
    def test_get_value_multi_dictionary_full(self):
        mvd = MultiValueDict({'foo': ['bar1', 'bar2']})
        assert ['bar1', 'bar2'] == self.field.get_value(mvd)

        mvd = MultiValueDict({'baz': ['bar1', 'bar2']})
        assert [] == self.field.get_value(mvd)
Exemple #57
0
class EmailBaseModel(models.Model):
    sender = models.EmailField(_("sender"), max_length=255)
    from_str = models.CharField(_("from"), max_length=255)
    recipient = models.CharField(_("recipient"), max_length=255)
    subject = models.CharField(_("subject"), max_length=255, blank=True)
    body_plain = models.TextField(_("body plain"), blank=True)
    body_html = models.TextField(_("body html"), blank=True)
    stripped_text = models.TextField(_("stripped text"), blank=True)
    stripped_html = models.TextField(_("stripped html"), blank=True)
    stripped_signature = models.TextField(_("stripped signature"), blank=True)
    message_headers = models.TextField(_("message headers"),
                                       blank=True,
                                       help_text=_("Stored in JSON."))
    content_id_map = models.TextField(
        _("Content-ID map"),
        blank=True,
        help_text=
        _("Dictionary mapping Content-ID (CID) values to corresponding attachments. Stored in JSON."
          ))
    received = models.DateTimeField(_("received"), auto_now_add=True)

    class Meta:
        abstract = True
        verbose_name = _("incoming email")
        verbose_name_plural = _("incoming emails")

    def __init__(self, *args, **kwargs):
        super(EmailBaseModel, self).__init__(*args, **kwargs)
        self._headers = None
        self._cids = None

    def _load_headers(self):
        self._headers = MultiValueDict()
        try:
            header_list = json.loads(self.message_headers)
            for key, val in header_list:
                self._headers.appendlist(key, val)
        except:
            logger.exception(
                "Error parsing JSON data containing message headers")

    @property
    def headers(self):
        """Access message_headers parsed into MultiValueDict"""
        if self._headers is None:
            self._load_headers()
        return self._headers

    def _load_cids(self):
        if self.content_id_map:
            self._cids = {}
        try:
            self._cids = json.loads(self.content_id_map)
        except:
            logger.exception("Error parsing JSON data containing Content-IDs")

    @property
    def content_ids(self):
        """Access content_id_map as dict"""
        if not self.content_id_map:
            return
        if self._cids is None:
            self._load_cids()
        return self._cids

    @property
    def message_id(self):
        return self.headers.get('Message-Id', None)

    @property
    def cc(self):
        return self.headers.get('Cc', None)

    @property
    def references(self):
        return self.headers.get('References', None)

    @property
    def in_reply_to(self):
        return self.headers.get('In-Reply-To', None)

    @property
    def site_url(self):
        return 'https://%s%s' % (Site.objects.get_current().domain,
                                 reverse('comlink:mail',
                                         kwargs={'id': self.id}))

    def get_mailgun_data(self, stripped=True, footer=True):
        if stripped:
            body_plain = self.stripped_text
            body_html = self.stripped_html
        else:
            body_plain = self.body_plain
            body_html = self.body_html

        if footer:
            # Add in a footer
            text_footer = "\n\n-------------------------------------------\n*~*~*~* Sent through Nadine *~*~*~*\n%s" % self.site_url
            body_plain = body_plain + text_footer
            if body_html:
                html_footer = "<br><br>-------------------------------------------<br>*~*~*~* Sent through Nadine *~*~*~*\n%s" % self.site_url
                body_html = body_html + html_footer

        # Build and return our data
        mailgun_data = {
            "from": self.from_str,
            "to": [
                self.recipient,
            ],
            "cc": [
                self.cc,
            ],
            "subject": self.subject,
            "text": body_plain,
            "html": body_html,
        }
        return mailgun_data

    def __str__(self):
        return _("Message from {from_str}: {subject_trunc}").format(
            from_str=self.from_str, subject_trunc=self.subject[:20])
Exemple #58
0
def test_multivalue_dict():
    d = MultiValueDict()
    d.update({'a': 'b', 'b': 'd'})
    d.update({'a': 'c', 'e': 'f'})
    d = compat.multidict_to_dict(d)
    assert d == {'a': ['b', 'c'], 'b': 'd', 'e': 'f'}
Exemple #59
0
 def setdefault(self, key, default=None):
     self._assert_mutable()
     key = str_to_unicode(key, self.encoding)
     default = str_to_unicode(default, self.encoding)
     return MultiValueDict.setdefault(self, key, default)
Exemple #60
0
 def __setitem__(self, key, value):
     self._assert_mutable()
     key = str_to_unicode(key, self.encoding)
     value = str_to_unicode(value, self.encoding)
     MultiValueDict.__setitem__(self, key, value)