예제 #1
0
 def get_files(self):
     files = MultiValueDict({})
     for step, step_data in self.request.session[self.prefix][self.step_data_session_key].items():
         if step_data.has_key('files'):
             for file in step_data.getlist('files'):
                 files.appendlist(step+'-file', self.file_storage.open(file.get('path')))
     return files
예제 #2
0
파일: __init__.py 프로젝트: 0xmilk/appscale
def parse_file_upload(header_dict, post_data):
    "Returns a tuple of (POST MultiValueDict, FILES MultiValueDict)"
    import email, email.Message
    from cgi import parse_header
    raw_message = '\r\n'.join(['%s:%s' % pair for pair in header_dict.items()])
    raw_message += '\r\n\r\n' + post_data
    msg = email.message_from_string(raw_message)
    POST = MultiValueDict()
    FILES = MultiValueDict()
    for submessage in msg.get_payload():
        if submessage and isinstance(submessage, email.Message.Message):
            name_dict = parse_header(submessage['Content-Disposition'])[1]
            # name_dict is something like {'name': 'file', 'filename': 'test.txt'} for file uploads
            # or {'name': 'blah'} for POST fields
            # We assume all uploaded files have a 'filename' set.
            if name_dict.has_key('filename'):
                assert type([]) != type(submessage.get_payload()), "Nested MIME messages are not supported"
                if not name_dict['filename'].strip():
                    continue
                # IE submits the full path, so trim everything but the basename.
                # (We can't use os.path.basename because it expects Linux paths.)
                filename = name_dict['filename'][name_dict['filename'].rfind("\\")+1:]
                FILES.appendlist(name_dict['name'], {
                    'filename': filename,
                    'content-type': (submessage.has_key('Content-Type') and submessage['Content-Type'] or None),
                    'content': submessage.get_payload(),
                })
            else:
                POST.appendlist(name_dict['name'], submessage.get_payload())
    return POST, FILES
예제 #3
0
 def test_encode_multipart_data_multiple_params(self):
     # Sequences of parameters and files can be passed to
     # encode_multipart_data() so that multiple parameters/files with the
     # same name can be provided.
     params_in = [
         ("one", "ABC"),
         ("one", "XYZ"),
         ("two", "DEF"),
         ("two", "UVW"),
         ]
     files_in = [
         ("f-one", BytesIO(urandom(32))),
         ("f-two", BytesIO(urandom(32))),
         ]
     body, headers = encode_multipart_data(params_in, files_in)
     self.assertEqual("%s" % len(body), headers["Content-Length"])
     self.assertThat(
         headers["Content-Type"],
         StartsWith("multipart/form-data; boundary="))
     # Round-trip through Django's multipart code.
     params_out, files_out = (
         parse_headers_and_body_with_django(headers, body))
     params_out_expected = MultiValueDict()
     for name, value in params_in:
         params_out_expected.appendlist(name, value)
     self.assertEqual(
         params_out_expected, params_out,
         ahem_django_ahem)
     self.assertSetEqual({"f-one", "f-two"}, set(files_out))
     files_expected = {name: buf.getvalue() for name, buf in files_in}
     files_observed = {name: buf.read() for name, buf in files_out.items()}
     self.assertEqual(
         files_expected, files_observed,
         ahem_django_ahem)
예제 #4
0
def parse_file_upload(header_dict, post_data):
    """Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
    import email, email.Message
    from cgi import parse_header
    raw_message = '\r\n'.join(['%s:%s' % pair for pair in header_dict.items()])
    raw_message += '\r\n\r\n' + post_data
    msg = email.message_from_string(raw_message)
    POST = QueryDict('', mutable=True)
    FILES = MultiValueDict()
    for submessage in msg.get_payload():
        if submessage and isinstance(submessage, email.Message.Message):
            name_dict = parse_header(submessage['Content-Disposition'])[1]
            # name_dict is something like {'name': 'file', 'filename': 'test.txt'} for file uploads
            # or {'name': 'blah'} for POST fields
            # We assume all uploaded files have a 'filename' set.
            if 'filename' in name_dict:
                assert type([]) != type(submessage.get_payload()), "Nested MIME messages are not supported"
                if not name_dict['filename'].strip():
                    continue
                # IE submits the full path, so trim everything but the basename.
                # (We can't use os.path.basename because that uses the server's
                # directory separator, which may not be the same as the
                # client's one.)
                filename = name_dict['filename'][name_dict['filename'].rfind("\\")+1:]
                FILES.appendlist(name_dict['name'], FileDict({
                    'filename': filename,
                    'content-type': 'Content-Type' in submessage and submessage['Content-Type'] or None,
                    'content': submessage.get_payload(),
                }))
            else:
                POST.appendlist(name_dict['name'], submessage.get_payload())
    return POST, FILES
예제 #5
0
 def status_update(self):
     self.debug('{0} running'.format(SCHEDULE_DESC))
     now = datetime.datetime.now()
     notifications = TagNotification.objects.filter(sent=False)
     notifications = notifications.select_related().order_by('tag', 'entry')
     self.info('found {0} notifications'.format(notifications.count()))
     users = {}
     for notification in notifications:
         email = notification.user.email
         if email not in users:
             users[email] = []
         users[email].append(notification)
     for email, notifications in users.iteritems():
         tags = MultiValueDict()
         for notification in notifications:
             tags.appendlist(notification.tag, notification)
         context = {'tags': tags}
         body = render_to_string('tree/digest.txt', context)
         try:
             send_mail(subject='Survey Response Report', message=body,
                       recipient_list=[email],
                       from_email='*****@*****.**',
                       fail_silently=False)
             sent = True
         except smtplib.SMTPException, e:
             self.exception(e)
             sent = False
         if sent:
             for notification in notifications:
                 notification.sent = True
                 notification.date_sent = datetime.datetime.now()
                 notification.save()
             self.info('Sent report to %s' % email)
예제 #6
0
파일: box.py 프로젝트: 3108as/ella
 def resolve_params(self, text):
     " Parse the parameters into a dict. "
     params = MultiValueDict()
     for line in text.split('\n'):
         pair = line.split(':', 1)
         if len(pair) == 2:
             params.appendlist(pair[0].strip(), pair[1].strip())
     return params
예제 #7
0
def _do_export(survey, subject_type, pk):
    facts = models.Fact.objects\
            .filter(survey=survey, content_type=subject_type, object_id=pk)\
            .select_related('desired_fact')

    export_data = MultiValueDict()
    for fact in facts:
        export_data.appendlist(fact.desired_fact.code, fact.data)
    return export_data
예제 #8
0
def jquery_to_dict(values):
    result = MultiValueDict()
    if not isinstance(values, (list, tuple, set)):
        return result
    for value in values:
        if not isinstance(value, dict):
            continue
        if 'name' in value and 'value' in value:
            result.appendlist(value['name'], value['value'])
    return result
예제 #9
0
    def _build_sub_query(self, search_node):
        terms = MultiValueDict()

        for child in search_node.children:
            if isinstance(child, SearchNode):
                terms.update(self._build_sub_query(child))
            else:
                terms.appendlist(child[0], child[1])

        return terms
예제 #10
0
class MakeMonthArrayNode (Node):
    
    def __init__ (self):
        self.year = None
        self.month = None
        self.data = MultiValueDict()
        self._weekday = None    # Weekday (Monday==0) for day 1 in this moth.
        self._n_days = None     # Number of days in month.
        
        
    def _n_added_days(self):
        """Number of added days at the end of the month to have a "full week"."""
        return ( 7 - (self._weekday + self._n_days)%7 ) % 7
    
    
    def __list2array__ (self, l, ncols=7):
        """Splits list l in lists of len()=cols. l must be "squareable".
        """
        return [ l[row*ncols:(row+1)*ncols] for row in range(0, len(l)/ncols) ]


    def squared (self):
        """Inserts keys and values in an array of days (month's calendar).
        
        Returns a list of weeks.
        One week, one list of days.
        One day, one list with [ <day-number> , [<data for this day>] ]
        """
        
        # Insert [None] in remaining days. IN PLACE.
        for i in range(1, self._n_days+1):
            if not self.data.has_key(i):
                self.data[i] = None
        
        # zerofill out-of-month days
        before = self._weekday * [[0,[None]]]
        after = self._n_added_days() * [[0,[None]]]
        # cat three lists and split per weeks
        return self.__list2array__ (before + self.data.lists() + after)
    
    
    
    def render(self, context):
        try:
            self.year = context['month'].year
            self.month = context['month'].month
            self._weekday, self._n_days = monthrange(self.year, self.month)
                
            for o in context['object_list']:
                self.data.appendlist (o.startdate.day, o)
                
            context['month_array'] = self.squared()
        except:
            pass
        return ''
예제 #11
0
    def existing_facts(survey, subject, prefix=None):
        """return all facts for a given survey and subject as a MultiValueDict,
        so that it can be bound to a form.
        """
        content_type = ContentType.objects.get_for_model(subject)
        facts = Fact.objects.filter(survey=survey,
                content_type=content_type, object_id=subject.id)\
                .select_related('desired_fact')

        fmt = (prefix and "%s-%s" % (prefix, "%s")) or "%s"
        existing_facts = MultiValueDict()
        for fact in facts:
            code = fmt % fact.desired_fact.code
            existing_facts.appendlist(code, fact.data)

        return existing_facts
예제 #12
0
def replace_esi_tags(request, response):
    process_errors = getattr(settings, 'ESI_PROCESS_ERRORS', False)
    fragment_headers = MultiValueDict()
    fragment_cookies = []
    request_data = {
        'cookies': request.COOKIES,
        'HTTP_REFERER': request.build_absolute_uri(),
        'HTTP_X_ESI_FRAGMENT': True,
    }

    replacement_offset = 0
    for match in esi_tag_re.finditer(response.content):
        url = build_full_fragment_url(request, match.group('url'))

        if response.status_code == 200 or process_errors:
            client = http_client.Client(**request_data)
            fragment = client.get(url)
        else:
            fragment = HttpResponse()

        if fragment.status_code != 200:
            extra = {'data': {
                'fragment': fragment.__dict__,
                'request': request.__dict__,
            }}
            log.error('ESI fragment %s returned status code %s' %
                (url, fragment.status_code), extra=extra)
            # Remove the error content so it isn't added to the page.
            fragment.content = ''

        start = match.start() + replacement_offset
        end = match.end() + replacement_offset
        response.content = '%s%s%s' % (response.content[:start],
            fragment.content, response.content[end:])
        replacement_offset += len(fragment.content) - len(match.group(0))

        for header in HEADERS_TO_MERGE:
            if header in fragment:
                fragment_headers.appendlist(header, fragment[header])
        if fragment.cookies:
            fragment_cookies.append(fragment.cookies)

    merge_fragment_headers(response, fragment_headers)
    merge_fragment_cookies(response, fragment_cookies)
예제 #13
0
    def handle_raw_input(self, input_data, META, content_length, boundary,
            encoding=None):
        super(AjaxUploadHandlerMixin, self).handle_raw_input(input_data, META,
                content_length, boundary, encoding)
        if META.get('HTTP_X_REQUESTED_WITH') != 'XMLHttpRequest':
            return # request is not an ajax request
        if not getattr(self, 'activated', True):
            return # file is too large for MemoryFileUploadHandler

        # filename is set as a header as well as a GET param
        file_name = META.get('HTTP_X_FILE_NAME', META.get('X_FILE_NAME'))
        if file_name is None:
            raise MultiPartParserError('Invalid File-Name: %r' % file_name)

        # clean the filename, taken from the default MultiPartParser
        file_name = force_unicode(file_name, encoding, errors='replace')
        file_name = self.IE_sanitize(unquote(file_name))
        if not file_name:
            raise MultiPartParserError('Invalid File-Name: %r' % file_name)

        # the javascript does not try to set the content_type
        # so we guess it from the filename.
        content_type, enc = mimetypes.guess_type(file_name)

        chunk_start = 0
        try:
            self.new_file(self.field_name, file_name, content_type,
                    content_length, encoding)
        except StopFutureHandlers:
            pass # expected from MemoryFileUploadHandler

        # feed the file to the real upload handler.
        for chunk in ChunkIter(input_data, self.chunk_size):
            chunk_length = len(chunk)
            self.receive_data_chunk(chunk, chunk_start)
            chunk_start += chunk_length
        file_obj = self.file_complete(chunk_start)

        # create the POST and FILES datastructures
        post = QueryDict(MultiValueDict(), encoding=encoding)
        files = MultiValueDict()
        if file_obj:
            files.appendlist(self.field_name, file_obj)
        return post, files
 def test_encode_multipart_data_multiple_params(self):
     # Sequences of parameters and files passed to
     # encode_multipart_data() permit use of the same name for
     # multiple parameters and/or files. See `make_payloads` to
     # understand how it processes different types of parameter
     # values.
     params_in = [
         ("one", "ABC"),
         ("one", "XYZ"),
         ("two", ["DEF", "UVW"]),
     ]
     files = [
         BytesIO(b"f1"),
         open(self.make_file(contents=b"f2"), "rb"),
         open(self.make_file(contents=b"f3"), "rb"),
     ]
     for fd in files:
         self.addCleanup(fd.close)
     files_in = [
         ("f-one", files[0]),
         ("f-two", files[1]),
         ("f-three", lambda: files[2]),
     ]
     body, headers = encode_multipart_data(params_in, files_in)
     self.assertEqual("%s" % len(body), headers["Content-Length"])
     self.assertThat(
         headers["Content-Type"],
         StartsWith("multipart/form-data; boundary="))
     # Round-trip through Django's multipart code.
     params_out, files_out = (
         parse_headers_and_body_with_django(headers, body))
     params_out_expected = MultiValueDict()
     params_out_expected.appendlist("one", "ABC")
     params_out_expected.appendlist("one", "XYZ")
     params_out_expected.appendlist("two", "DEF")
     params_out_expected.appendlist("two", "UVW")
     self.assertEqual(
         params_out_expected, params_out,
         ahem_django_ahem)
     files_expected = {"f-one": b"f1", "f-two": b"f2", "f-three": b"f3"}
     files_observed = {name: buf.read() for name, buf in files_out.items()}
     self.assertEqual(
         files_expected, files_observed,
         ahem_django_ahem)
예제 #15
0
    def test_save_valid_multiple_choice(self):
        self.desired_fact.data_type = 'M'
        self.desired_fact.save()

        FactOption.objects.create(desired_fact=self.desired_fact,
                code='01', description='1')
        FactOption.objects.create(desired_fact=self.desired_fact,
                code='02', description='2')

        cls = forms.make_survey_form_subclass(self.survey, self.content_type)
        post_data = MultiValueDict()
        post_data.appendlist('code1', '01')
        post_data.appendlist('code1', '02')
        cls(self.survey, self.subject, self.user, data=post_data).save_valid()

        self.assertTrue(Fact.objects.filter(
            desired_fact=self.desired_fact, data='01').exists())
        self.assertTrue(Fact.objects.filter(
            desired_fact=self.desired_fact, data='02').exists())
예제 #16
0
def spurl(parser, token):
    bits = token.split_contents()
    if len(bits) < 2:
        raise TemplateSyntaxError("'spurl' takes at least one argument")

    kwargs = MultiValueDict()
    asvar = None
    bits = bits[1:]

    if len(bits) >= 2 and bits[-2] == 'as':
        asvar = bits[-1]
        bits = bits[:-2]

    for bit in bits:
        name, value = kwarg_re.match(bit).groups()
        if not (name and value):
            raise TemplateSyntaxError("Malformed arguments to spurl tag")
        kwargs.appendlist(name, parser.compile_filter(value))
    return SpurlNode(kwargs, asvar)
예제 #17
0
파일: views.py 프로젝트: julienr/myzivi
def index(request):
    # Save current language in session (in case the user used a i18n url)
    lang = translation.get_language()
    request.session['django_language'] = lang

    date_min, date_max = DateRange.get_min_max()
    form = SearchForm(date_min, date_max, request.GET)
    if form.is_valid():
        cd = form.cleaned_data

        first_time_message = not request.session.get('visited', False)
        request.session['visited'] = True

        workspec_search_url = reverse('api_dispatch_list', kwargs={
            'api_name': 'v1',
            'resource_name': 'workspec_search'})
        query = MultiValueDict()
        query['format'] = 'json'
        for lang in cd['languages']:
            query.appendlist('language__in', lang)
        for dom in cd['domains']:
            query.appendlist('activity_domain__in', dom)
        start_min = cd['start_min']
        end_max = cd['end_max']
        if start_min:
            query['start_min'] = start_min
        if end_max:
            query['end_max'] = end_max

        workspec_search_url += '?' + urlencode(query, doseq=True)
        address_url = reverse('api_dispatch_list', kwargs={
            'api_name': 'v1',
            'resource_name': 'address'})

        context = {'search_form' : form,
                   'first_time_message' : first_time_message,
                   'workspec_search_url' : workspec_search_url,
                   'address_url': address_url}

        return render(request, 'index.html', context)
    else:
        return redirect('index')
예제 #18
0
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     if getattr(self._local, 'populating', False):
         return
     try:
         self._local.populating = True
         lookups = MultiValueDict()
         namespaces = {}
         apps = {}
         language_code = get_language()
         for url_pattern in reversed(self.url_patterns):
             p_pattern = url_pattern.pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(url_pattern, URLPattern):
                 self._callback_strs.add(url_pattern.lookup_str)
                 bits = normalize(url_pattern.pattern.regex.pattern)
                 lookups.appendlist(
                     url_pattern.callback,
                     (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters)
                 )
                 if url_pattern.name is not None:
                     lookups.appendlist(
                         url_pattern.name,
                         (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters)
                     )
             else:  # url_pattern is a URLResolver.
                 url_pattern._populate()
                 if url_pattern.app_name:
                     apps.setdefault(url_pattern.app_name, []).append(url_pattern.namespace)
                     namespaces[url_pattern.namespace] = (p_pattern, url_pattern)
                 else:
                     for name in url_pattern.reverse_dict:
                         for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(name):
                             new_matches = normalize(p_pattern + pat)
                             lookups.appendlist(
                                 name,
                                 (
                                     new_matches,
                                     p_pattern + pat,
                                     {**defaults, **url_pattern.default_kwargs},
                                     {**self.pattern.converters, **url_pattern.pattern.converters, **converters}
                                 )
                             )
                     for namespace, (prefix, sub_pattern) in url_pattern.namespace_dict.items():
                         current_converters = url_pattern.pattern.converters
                         sub_pattern.pattern.converters.update(current_converters)
                         namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                     for app_name, namespace_list in url_pattern.app_dict.items():
                         apps.setdefault(app_name, []).extend(namespace_list)
                 self._callback_strs.update(url_pattern._callback_strs)
         self._namespace_dict[language_code] = namespaces
         self._app_dict[language_code] = apps
         self._reverse_dict[language_code] = lookups
         self._populated = True
     finally:
         self._local.populating = False
예제 #19
0
 def _populate(self):
     lookups = MultiValueDict()
     namespaces = {}
     apps = {}
     language_code = get_language()
     for pattern in reversed(self.url_patterns):
         p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name, []).append(pattern.namespace)
             else:
                 parent = normalize(pattern.regex.pattern)
                 for name in pattern.reverse_dict:
                     for matches, pat, defaults in pattern.reverse_dict.getlist(name):
                         new_matches = []
                         for piece, p_args in parent:
                             new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                         lookups.appendlist(name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs)))
                 for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
         else:
             bits = normalize(p_pattern)
             lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
             if pattern.name is not None:
                 lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
     self._reverse_dict[language_code] = lookups
     self._namespace_dict[language_code] = namespaces
     self._app_dict[language_code] = apps
예제 #20
0
    def formfield_for_dbfield(self, db_field, **kwargs):
        request = kwargs.pop("request", None)

        # Add a select field of available commands
        if db_field.name == 'command':
            choices_dict = MultiValueDict()
            for command, app in get_commands().items():
                choices_dict.appendlist(app, command)

            choices = []
            for key in choices_dict.keys():
                #if str(key).startswith('<'):
                #    key = str(key)
                commands = choices_dict.getlist(key)
                commands.sort()
                choices.append([key, [[c,c] for c in commands]])

            kwargs['widget'] = forms.widgets.Select(choices=choices)
            return db_field.formfield(**kwargs)

        kwargs['request'] = request
        return super(JobAdmin, self).formfield_for_dbfield(db_field, **kwargs)
예제 #21
0
class RegexURLResolver(object):
    def __init__(self, regex, urlconf_name, default_kwargs=None):
        # regex is a string representing a regular expression.
        # urlconf_name is a string representing the module containing urlconfs.
        self.regex = re.compile(regex, re.UNICODE)
        self.urlconf_name = urlconf_name
        self.callback = None
        self.default_kwargs = default_kwargs or {}
        self._reverse_dict = MultiValueDict()

    def __repr__(self):
        return '<%s %s %s>' % (self.__class__.__name__, self.urlconf_name, self.regex.pattern)

    def _get_reverse_dict(self):
        if not self._reverse_dict and hasattr(self.urlconf_module, 'urlpatterns'):
            try:
                for pattern in reversed(self.urlconf_module.urlpatterns):
                    p_pattern = pattern.regex.pattern
                    if p_pattern.startswith('^'):
                        p_pattern = p_pattern[1:]
                    if isinstance(pattern, RegexURLResolver):
                        parent = normalize(pattern.regex.pattern)
                        for name in pattern.reverse_dict:
                            for matches, pat in pattern.reverse_dict.getlist(name):
                                new_matches = []
                                for piece, p_args in parent:
                                    new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                                self._reverse_dict.appendlist(name, (new_matches, p_pattern + pat))
                    else:
                        bits = normalize(p_pattern)
                        self._reverse_dict.appendlist(pattern.callback, (bits, p_pattern))
                        self._reverse_dict.appendlist(pattern.name, (bits, p_pattern))
            except:
                self._reverse_dict.clear()
                raise
        return self._reverse_dict
    reverse_dict = property(_get_reverse_dict)

    def resolve(self, path):
        tried = []
        match = self.regex.search(path)
        if match:
            new_path = path[match.end():]
            for pattern in self.urlconf_module.urlpatterns:
                try:
                    sub_match = pattern.resolve(new_path)
                except Resolver404, e:
                    tried.extend([(pattern.regex.pattern + '   ' + t) for t in e.args[0]['tried']])
                else:
                    if sub_match:
                        sub_match_dict = dict([(smart_str(k), v) for k, v in match.groupdict().items()])
                        sub_match_dict.update(self.default_kwargs)
                        for k, v in sub_match[2].iteritems():
                            sub_match_dict[smart_str(k)] = v
                        return sub_match[0], sub_match[1], sub_match_dict
                    tried.append(pattern.regex.pattern)
            raise Resolver404, {'tried': tried, 'path': new_path}
예제 #22
0
    def _populate(self):
        lookups = MultiValueDict()
        namespaces = {}
        apps = {}
        language_code = get_language()
        for pattern in reversed(self.url_patterns):
            if hasattr(pattern, '_callback_str'):
                self._callback_strs.add(pattern._callback_str)
            elif hasattr(pattern, '_callback'):
                callback = pattern._callback
                if isinstance(callback, functools.partial):
                    callback = callback.func

                if not hasattr(callback, '__name__'):
                    lookup_str = callback.__module__ + "." + callback.__class__.__name__
                else:
                    lookup_str = callback.__module__ + "." + callback.__name__
                self._callback_strs.add(lookup_str)
            p_pattern = pattern.regex.pattern
            if p_pattern.startswith('^'):
                p_pattern = p_pattern[1:]
            if isinstance(pattern, RegexURLResolver):
                if pattern.namespace:
                    namespaces[pattern.namespace] = (p_pattern, pattern)
                    if pattern.app_name:
                        apps.setdefault(pattern.app_name,
                                        []).append(pattern.namespace)
                else:
                    parent_pat = pattern.regex.pattern
                    for name in pattern.reverse_dict:
                        for matches, pat, defaults in pattern.reverse_dict.getlist(
                                name):
                            new_matches = normalize(parent_pat + pat)
                            lookups.appendlist(
                                name,
                                (new_matches, p_pattern + pat,
                                 dict(defaults, **pattern.default_kwargs)))
                    for namespace, (
                            prefix,
                            sub_pattern) in pattern.namespace_dict.items():
                        namespaces[namespace] = (p_pattern + prefix,
                                                 sub_pattern)
                    for app_name, namespace_list in pattern.app_dict.items():
                        apps.setdefault(app_name, []).extend(namespace_list)
                    self._callback_strs.update(pattern._callback_strs)
            else:
                bits = normalize(p_pattern)
                lookups.appendlist(pattern.callback,
                                   (bits, p_pattern, pattern.default_args))
                if pattern.name is not None:
                    lookups.appendlist(pattern.name,
                                       (bits, p_pattern, pattern.default_args))
        self._reverse_dict[language_code] = lookups
        self._namespace_dict[language_code] = namespaces
        self._app_dict[language_code] = apps
        self._populated = True
예제 #23
0
    def formfield_for_dbfield(self, db_field, **kwargs):
        request = kwargs.pop("request", None)

        # Add a select field of available commands
        if db_field.name == 'command':
            choices_dict = MultiValueDict()
            #l = get_commands().items():
            #l = [('kitsune_base_check', 'kitsune')]
            l = get_kitsune_checks()
            for command, app in l:
                choices_dict.appendlist(app, command)

            choices = []
            for key in choices_dict.keys():
                #if str(key).startswith('<'):
                #    key = str(key)
                commands = choices_dict.getlist(key)
                commands.sort()
                choices.append([key, [[c, c] for c in commands]])

            kwargs['widget'] = forms.widgets.Select(choices=choices)
            return db_field.formfield(**kwargs)
        kwargs['request'] = request
        return super(JobAdmin, self).formfield_for_dbfield(db_field, **kwargs)
예제 #24
0
class RegexURLResolver(object):
    def __init__(self, regex, urlconf_name, default_kwargs=None):
        # regex is a string representing a regular expression.
        # urlconf_name is a string representing the module containing urlconfs.
        self.regex = re.compile(regex, re.UNICODE)
        self.urlconf_name = urlconf_name
        self.callback = None
        self.default_kwargs = default_kwargs or {}
        self._reverse_dict = MultiValueDict()

    def __repr__(self):
        return '<%s %s %s>' % (self.__class__.__name__, self.urlconf_name, self.regex.pattern)

    def _get_reverse_dict(self):
        if not self._reverse_dict and hasattr(self.urlconf_module, 'urlpatterns'):
            for pattern in reversed(self.urlconf_module.urlpatterns):
                p_pattern = pattern.regex.pattern
                if p_pattern.startswith('^'):
                    p_pattern = p_pattern[1:]
                if isinstance(pattern, RegexURLResolver):
                    parent = normalize(pattern.regex.pattern)
                    for name in pattern.reverse_dict:
                        for matches, pat in pattern.reverse_dict.getlist(name):
                            new_matches = []
                            for piece, p_args in parent:
                                new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                            self._reverse_dict.appendlist(name, (new_matches, p_pattern + pat))
                else:
                    bits = normalize(p_pattern)
                    self._reverse_dict.appendlist(pattern.callback, (bits, p_pattern))
                    self._reverse_dict.appendlist(pattern.name, (bits, p_pattern))
        return self._reverse_dict
    reverse_dict = property(_get_reverse_dict)

    def resolve(self, path):
        tried = []
        match = self.regex.search(path)
        if match:
            new_path = path[match.end():]
            for pattern in self.urlconf_module.urlpatterns:
                try:
                    sub_match = pattern.resolve(new_path)
                except Resolver404, e:
                    tried.extend([(pattern.regex.pattern + '   ' + t) for t in e.args[0]['tried']])
                else:
                    if sub_match:
                        sub_match_dict = dict([(smart_str(k), v) for k, v in match.groupdict().items()])
                        sub_match_dict.update(self.default_kwargs)
                        for k, v in sub_match[2].iteritems():
                            sub_match_dict[smart_str(k)] = v
                        return sub_match[0], sub_match[1], sub_match_dict
                    tried.append(pattern.regex.pattern)
            raise Resolver404, {'tried': tried, 'path': new_path}
예제 #25
0
 def _populate(self):
     if self._populating:
         return
     self._populating = True
     lookups = MultiValueDict()
     namespaces = {}
     apps = {}
     language_code = get_language()
     for pattern in reversed(self.url_patterns):
         if isinstance(pattern, RegexURLPattern):
             self._callback_strs.add(pattern.lookup_str)
         p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name,
                                     []).append(pattern.namespace)
             else:
                 parent_pat = pattern.regex.pattern
                 for name in pattern.reverse_dict:
                     for matches, pat, defaults in pattern.reverse_dict.getlist(
                             name):
                         new_matches = normalize(parent_pat + pat)
                         lookups.appendlist(name, (
                             new_matches,
                             p_pattern + pat,
                             dict(defaults, **pattern.default_kwargs),
                         ))
                 for namespace, (
                         prefix,
                         sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix,
                                              sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
             if not pattern._populating:
                 pattern._populate()
             self._callback_strs.update(pattern._callback_strs)
         else:
             bits = normalize(p_pattern)
             lookups.appendlist(pattern.callback,
                                (bits, p_pattern, pattern.default_args))
             if pattern.name is not None:
                 lookups.appendlist(pattern.name,
                                    (bits, p_pattern, pattern.default_args))
     self._reverse_dict[language_code] = lookups
     self._namespace_dict[language_code] = namespaces
     self._app_dict[language_code] = apps
     self._populated = True
     self._populating = False
예제 #26
0
파일: resolvers.py 프로젝트: Runur/django-1
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     if getattr(self._local, 'populating', False):
         return
     self._local.populating = True
     lookups = MultiValueDict()
     namespaces = {}
     apps = {}
     language_code = get_language()
     for pattern in reversed(self.url_patterns):
         if isinstance(pattern, RegexURLPattern):
             self._callback_strs.add(pattern.lookup_str)
         p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name, []).append(pattern.namespace)
             else:
                 parent_pat = pattern.regex.pattern
                 for name in pattern.reverse_dict:
                     for matches, pat, defaults in pattern.reverse_dict.getlist(name):
                         new_matches = normalize(parent_pat + pat)
                         lookups.appendlist(
                             name,
                             (
                                 new_matches,
                                 p_pattern + pat,
                                 dict(defaults, **pattern.default_kwargs),
                             )
                         )
                 for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
             if not getattr(pattern._local, 'populating', False):
                 pattern._populate()
             self._callback_strs.update(pattern._callback_strs)
         else:
             bits = normalize(p_pattern)
             lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
             if pattern.name is not None:
                 lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
     self._reverse_dict[language_code] = lookups
     self._namespace_dict[language_code] = namespaces
     self._app_dict[language_code] = apps
     self._populated = True
     self._local.populating = False
예제 #27
0
    def _populate(self):
        lookups = MultiValueDict()
        namespaces = {}
        apps = {}
        language_code = get_language()
        for pattern in reversed(self.url_patterns):
            if hasattr(pattern, '_callback_str'):
                self._callback_strs.add(pattern._callback_str)
            elif hasattr(pattern, '_callback'):
                callback = pattern._callback
                if isinstance(callback, functools.partial):
                    callback = callback.func

                if not hasattr(callback, '__name__'):
                    lookup_str = callback.__module__ + "." + callback.__class__.__name__
                else:
                    lookup_str = callback.__module__ + "." + callback.__name__
                self._callback_strs.add(lookup_str)
            p_pattern = pattern.regex.pattern
            if p_pattern.startswith('^'):
                p_pattern = p_pattern[1:]
            if isinstance(pattern, RegexURLResolver):
                if pattern.namespace:
                    namespaces[pattern.namespace] = (p_pattern, pattern)
                    if pattern.app_name:
                        apps.setdefault(pattern.app_name, []).append(pattern.namespace)
                else:
                    parent_pat = pattern.regex.pattern
                    for name in pattern.reverse_dict:
                        for matches, pat, defaults in pattern.reverse_dict.getlist(name):
                            new_matches = normalize(parent_pat + pat)
                            lookups.appendlist(
                                name,
                                (
                                    new_matches,
                                    p_pattern + pat,
                                    dict(defaults, **pattern.default_kwargs),
                                )
                            )
                    for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                        namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                    for app_name, namespace_list in pattern.app_dict.items():
                        apps.setdefault(app_name, []).extend(namespace_list)
                    self._callback_strs.update(pattern._callback_strs)
            else:
                bits = normalize(p_pattern)
                lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
                if pattern.name is not None:
                    lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
        self._reverse_dict[language_code] = lookups
        self._namespace_dict[language_code] = namespaces
        self._app_dict[language_code] = apps
        self._populated = True
예제 #28
0
 def _populate(self):
     if self._populating:
         return
     self._populating = True
     lookups = MultiValueDict()
     namespaces = {}
     apps = {}
     language_code = get_language()
     for pattern in reversed(self.url_patterns):
         if isinstance(pattern, RegexURLPattern):
             self._callback_strs.add(pattern.lookup_str)
         p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name, []).append(pattern.namespace)
             else:
                 parent_pat = pattern.regex.pattern
                 for name in pattern.reverse_dict:
                     for matches, pat, defaults in pattern.reverse_dict.getlist(name):
                         new_matches = normalize(parent_pat + pat)
                         lookups.appendlist(
                             name,
                             (
                                 new_matches,
                                 p_pattern + pat,
                                 dict(defaults, **pattern.default_kwargs),
                             )
                         )
                 for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
             if not pattern._populating:
                 pattern._populate()
             self._callback_strs.update(pattern._callback_strs)
         else:
             bits = normalize(p_pattern)
             lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
             if pattern.name is not None:
                 lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
     self._reverse_dict[language_code] = lookups
     self._namespace_dict[language_code] = namespaces
     self._app_dict[language_code] = apps
     self._populated = True
     self._populating = False
예제 #29
0
    def _populate(self):
        lookups = MultiValueDict() # key-list
        namespaces = {}
        apps = {}
        language_code = get_language()

        for pattern in reversed(self.url_patterns): # def url_patterns(self): 从模块中加载 urlpatterns

            # pattern 是 RegexURLPattern 类型
            p_pattern = pattern.regex.pattern

            if p_pattern.startswith('^'):
                p_pattern = p_pattern[1:]

            if isinstance(pattern, RegexURLResolver): # 如果就是本身类 RegexURLResolver 的一个实例
                if pattern.namespace:
                    namespaces[pattern.namespace] = (p_pattern, pattern)

                    if pattern.app_name:                    # 设置为空
                        apps.setdefault(pattern.app_name, [] ).append(pattern.namespace)

                else:  # 如果就是本身类 RegexURLResolver 的一个实例, 但不存在命名空间
                    parent = normalize(pattern.regex.pattern)

                    for name in pattern.reverse_dict:

                        for matches, pat, defaults in pattern.reverse_dict.getlist(name):

                            new_matches = []

                            for piece, p_args in parent:
                                new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                            lookups.appendlist(name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs)))

                    for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                        namespaces[namespace] = (p_pattern + prefix, sub_pattern)

                    for app_name, namespace_list in pattern.app_dict.items():
                        apps.setdefault(app_name, []).extend(namespace_list)

            else:
                bits = normalize(p_pattern)
                lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
                if pattern.name is not None:
                    lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))

        self._reverse_dict[language_code] = lookups
        self._namespace_dict[language_code] = namespaces
        self._app_dict[language_code] = apps
예제 #30
0
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     if getattr(self._local, "populating", False):
         return
     self._local.populating = True
     lookups = MultiValueDict()
     namespaces = {}
     apps = {}
     language_code = get_language()
     for pattern in reversed(self.url_patterns):
         if isinstance(pattern, RegexURLPattern):
             self._callback_strs.add(pattern.lookup_str)
         p_pattern = pattern.regex.pattern
         if p_pattern.startswith("^"):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name, []).append(pattern.namespace)
             else:
                 parent_pat = pattern.regex.pattern
                 for name in pattern.reverse_dict:
                     for matches, pat, defaults in pattern.reverse_dict.getlist(name):
                         new_matches = normalize(parent_pat + pat)
                         lookups.appendlist(
                             name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs))
                         )
                 for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
             if not getattr(pattern._local, "populating", False):
                 pattern._populate()
             self._callback_strs.update(pattern._callback_strs)
         else:
             bits = normalize(p_pattern)
             lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
             if pattern.name is not None:
                 lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
     self._reverse_dict[language_code] = lookups
     self._namespace_dict[language_code] = namespaces
     self._app_dict[language_code] = apps
     self._populated = True
     self._local.populating = False
예제 #31
0
 def _populate(self):
     lookups = MultiValueDict()
     namespaces = {}
     apps = {}
     for pattern in reversed(self.url_patterns):
         p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name,
                                     []).append(pattern.namespace)
             else:
                 parent = normalize(pattern.regex.pattern)
                 for name in pattern.reverse_dict:
                     for matches, pat, defaults in pattern.reverse_dict.getlist(
                             name):
                         new_matches = []
                         for piece, p_args in parent:
                             new_matches.extend([
                                 (piece + suffix, p_args + args)
                                 for (suffix, args) in matches
                             ])
                         lookups.appendlist(
                             name,
                             (new_matches, p_pattern + pat,
                              dict(defaults, **pattern.default_kwargs)))
                 for namespace, (
                         prefix,
                         sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix,
                                              sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
         else:
             bits = normalize(p_pattern)
             lookups.appendlist(pattern.callback,
                                (bits, p_pattern, pattern.default_args))
             if pattern.name is not None:
                 lookups.appendlist(pattern.name,
                                    (bits, p_pattern, pattern.default_args))
     self._reverse_dict = lookups
     self._namespace_dict = namespaces
     self._app_dict = apps
예제 #32
0
    def _populate(self):
        # Almost the same as the original `_populate` function, except the last
        # 4 lines of code.
        lookups = MultiValueDict()
        namespaces = {}
        apps = {}
        language_code = get_language()
        for pattern in reversed(self.url_patterns):
            p_pattern = pattern.regex.pattern
            if p_pattern.startswith('^'):
                p_pattern = p_pattern[1:]
            if isinstance(pattern, RegexURLResolver):
                if pattern.namespace:
                    namespaces[pattern.namespace] = (p_pattern, pattern)
                    if pattern.app_name:
                        apps.setdefault(pattern.app_name,
                                        []).append(pattern.namespace)
                else:
                    parent = normalize(pattern.regex.pattern)
                    for name in pattern.reverse_dict:
                        for matches, pat in pattern.reverse_dict.getlist(name):
                            new_matches = []
                            for piece, p_args in parent:
                                new_matches.extend([
                                    (piece + suffix, p_args + args)
                                    for (suffix, args) in matches
                                ])
                            lookups.appendlist(name,
                                               (new_matches, p_pattern + pat))
                    for namespace, (
                            prefix,
                            sub_pattern) in pattern.namespace_dict.items():
                        namespaces[namespace] = (p_pattern + prefix,
                                                 sub_pattern)
                    for app_name, namespace_list in pattern.app_dict.items():
                        apps.setdefault(app_name, []).extend(namespace_list)
            else:
                bits = normalize(p_pattern)
                lookups.appendlist(pattern.callback, (bits, p_pattern))
                if pattern.name is not None:
                    lookups.appendlist(pattern.name, (bits, p_pattern))

        self._i18n_reverse_dict[language_code] = lookups
        self._i18n_namespace_dict[language_code] = namespaces
        self._i18n_app_dict[language_code] = apps
예제 #33
0
 def _build_reverse_dict_for_lang(self, lang):
     reverse_dict = MultiValueDict()
     namespaces = {}
     apps = {}
     for pattern in reversed(self.url_patterns):
         if hasattr(pattern, 'get_regex'):
             p_pattern = pattern.get_regex(lang).pattern
         else:
             p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name, []).append(pattern.namespace)
             else:
                 if hasattr(pattern, 'get_regex'):
                     parent = normalize(pattern.get_regex(lang).pattern)
                 else:
                     parent = normalize(pattern.regex.pattern)
                 if hasattr(pattern, 'get_reverse_dict'):
                     sub_reverse_dict = pattern.get_reverse_dict(lang)
                 else:
                     sub_reverse_dict = pattern.reverse_dict
                 for name in sub_reverse_dict:
                     for matches, pat in sub_reverse_dict.getlist(name):
                         new_matches = []
                         for piece, p_args in parent:
                             new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                         reverse_dict.appendlist(name, (new_matches, p_pattern + pat))
                 for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
         else:
             bits = normalize(p_pattern)
             reverse_dict.appendlist(pattern.callback, (bits, p_pattern))
             reverse_dict.appendlist(pattern.name, (bits, p_pattern))
     self._namespace_dict = namespaces
     self._app_dict = apps
     return reverse_dict
예제 #34
0
 def _build_reverse_dict_for_lang(self, lang):
     reverse_dict = MultiValueDict()
     namespaces = {}
     apps = {}
     for pattern in reversed(self.url_patterns):
         if hasattr(pattern, 'get_regex'):
             p_pattern = pattern.get_regex(lang).pattern
         else:
             p_pattern = pattern.regex.pattern
         if p_pattern.startswith('^'):
             p_pattern = p_pattern[1:]
         if isinstance(pattern, RegexURLResolver):
             if pattern.namespace:
                 namespaces[pattern.namespace] = (p_pattern, pattern)
                 if pattern.app_name:
                     apps.setdefault(pattern.app_name, []).append(pattern.namespace)
             else:
                 if hasattr(pattern, 'get_regex'):
                     parent = normalize(pattern.get_regex(lang).pattern)
                 else:
                     parent = normalize(pattern.regex.pattern)
                 if hasattr(pattern, 'get_reverse_dict'):
                     sub_reverse_dict = pattern.get_reverse_dict(lang)
                 else:
                     sub_reverse_dict = pattern.reverse_dict
                 for name in sub_reverse_dict:
                     for matches, pat in sub_reverse_dict.getlist(name):
                         new_matches = []
                         for piece, p_args in parent:
                             new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                         reverse_dict.appendlist(name, (new_matches, p_pattern + pat))
                 for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
                     namespaces[namespace] = (p_pattern + prefix, sub_pattern)
                 for app_name, namespace_list in pattern.app_dict.items():
                     apps.setdefault(app_name, []).extend(namespace_list)
         else:
             bits = normalize(p_pattern)
             reverse_dict.appendlist(pattern.callback, (bits, p_pattern))
             reverse_dict.appendlist(pattern.name, (bits, p_pattern))
     self._namespace_dict = namespaces
     self._app_dict = apps
     return reverse_dict
예제 #35
0
 def _get_reverse_dict(self):
     if not self._reverse_dict:
         lookups = MultiValueDict()
         for pattern in reversed(self.url_patterns):
             p_pattern = pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(pattern, RegexURLResolver):
                 parent = normalize(pattern.regex.pattern)
                 for name in pattern.reverse_dict:
                     for matches, pat in pattern.reverse_dict.getlist(name):
                         new_matches = []
                         for piece, p_args in parent:
                             new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                         lookups.appendlist(name, (new_matches, p_pattern + pat))
             else:
                 bits = normalize(p_pattern)
                 lookups.appendlist(pattern.callback, (bits, p_pattern))
                 lookups.appendlist(pattern.name, (bits, p_pattern))
         self._reverse_dict = lookups
     return self._reverse_dict
예제 #36
0
 def _get_reverse_dict(self):
     if not self._reverse_dict and hasattr(self.urlconf_module, 'urlpatterns'):
         lookups = MultiValueDict()
         for pattern in reversed(self.urlconf_module.urlpatterns):
             p_pattern = pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(pattern, RegexURLResolver):
                 parent = normalize(pattern.regex.pattern)
                 for name in pattern.reverse_dict:
                     for matches, pat in pattern.reverse_dict.getlist(name):
                         new_matches = []
                         for piece, p_args in parent:
                             new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
                         lookups.appendlist(name, (new_matches, p_pattern + pat))
             else:
                 bits = normalize(p_pattern)
                 lookups.appendlist(pattern.callback, (bits, p_pattern))
                 lookups.appendlist(pattern.name, (bits, p_pattern))
         self._reverse_dict = lookups
     return self._reverse_dict
예제 #37
0
def lxml_form_data(form):
    """Get form data as MultiValueDict from lxml Form element"""
    data = MultiValueDict()
    for obj in form.cssselect('input[type=text],'
                              'input[type=hidden],'
                              'textarea'):
        data.appendlist(
            obj.name,
            obj.value or ''
        )
    for obj in form.cssselect('input[type=checkbox][checked],'
                              'input[type=radio][checked]'):
        data.appendlist(
            obj.name,
            obj.value or '',
        )
    for obj in form.cssselect('select'):
        for opt in obj.cssselect('option[selected]'):
            data.appendlist(
                obj.name,
                opt.get('value'),
            )
    return data
예제 #38
0
 def appendlist(self, key, value):
     self._assert_mutable()
     key = str_to_unicode(key, self.encoding)
     value = str_to_unicode(value, self.encoding)
     MultiValueDict.appendlist(self, key, value)
예제 #39
0
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     #pylint:disable=protected-access,too-many-locals
     if getattr(self._local, 'populating', False):
         return
     try:
         self._local.populating = True
         lookups = MultiValueDict()
         namespaces = {}
         apps = {}
         path_prefix = self._get_path_prefix()
         for url_pattern in reversed(self.url_patterns):
             if isinstance(url_pattern, DjangoRegexURLPattern):
                 self._callback_strs.add(url_pattern.lookup_str)
             # could be RegexURLPattern.regex or RegexURLResolver.regex here.
             p_pattern = url_pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(url_pattern, DjangoRegexURLResolver):
                 if url_pattern.namespace:
                     namespaces[url_pattern.namespace] = (p_pattern,
                                                          url_pattern)
                     if url_pattern.app_name:
                         apps.setdefault(url_pattern.app_name,
                                         []).append(url_pattern.namespace)
                 else:
                     parent_pat = url_pattern.regex.pattern
                     for name in url_pattern.reverse_dict:
                         for _, pat, defaults \
                             in url_pattern.reverse_dict.getlist(name):
                             new_matches = normalize(parent_pat + pat)
                             lookups.appendlist(name, (
                                 new_matches,
                                 p_pattern + pat,
                                 dict(defaults, **
                                      url_pattern.default_kwargs),
                             ))
                     for namespace, (prefix, sub_pattern) \
                         in url_pattern.namespace_dict.items():
                         namespaces[namespace] = (p_pattern + prefix,
                                                  sub_pattern)
                     for app_name, namespace_list in \
                             url_pattern.app_dict.items():
                         apps.setdefault(app_name,
                                         []).extend(namespace_list)
                 if not getattr(url_pattern._local, 'populating', False):
                     url_pattern._populate()
                 self._callback_strs.update(url_pattern._callback_strs)
             else:
                 bits = normalize(p_pattern)
                 lookups.appendlist(
                     url_pattern.callback,
                     (bits, p_pattern, url_pattern.default_args))
                 if url_pattern.name is not None:
                     lookups.appendlist(
                         url_pattern.name,
                         (bits, p_pattern, url_pattern.default_args))
         self._reverse_dict[path_prefix] = lookups
         self._namespace_dict[path_prefix] = namespaces
         self._app_dict[path_prefix] = apps
         self._populated = True
     finally:
         self._local.populating = False
예제 #40
0
class MultiPartParser(object):
    """
    A rfc2388 multipart/form-data parser.

    ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
    and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
    """
    def __init__(self, META, input_data, upload_handlers, encoding=None):
        """
        Initialize the MultiPartParser object.

        :META:
            The standard ``META`` dictionary in Django request objects. 字典
        :input_data:
            The raw post data, as a file-like object. post 数据
        :upload_handler:
            An UploadHandler instance that performs operations on the uploaded
            data.
        :encoding:
            The encoding with which to treat the incoming data.
        """

        #
        # Content-Type should containt multipart and the boundary information.
        #

        content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
        if not content_type.startswith('multipart/'):
            raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # invalid content-type multipart 是无效的

        # Parse the header to get the boundary to split the parts. boundary 界限, 将数据分成就给部分
        # parse_header  可以分析 HTTP 头的某一个部位
        ctypes, opts = parse_header(content_type.encode('ascii')) 
        boundary = opts.get('boundary')
        if not boundary or not cgi.valid_boundary(boundary):
            raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)

        # Content-Length should contain the length of the body we are about
        # to receive.
        try:
            content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
        except (ValueError, TypeError):
            content_length = 0

        if content_length < 0:
            # This means we shouldn't continue...raise an error.
            raise MultiPartParserError("Invalid content length: %r" % content_length)

        if isinstance(boundary, six.text_type): # 转化为 ascii
            boundary = boundary.encode('ascii')

        self._boundary = boundary
        self._input_data = input_data

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
        self._chunk_size = min([2**31-4] + possible_sizes)

        self._meta = META #正常赋值
        self._encoding = encoding or settings.DEFAULT_CHARSET# 如果没有,设定 setting 中的值
        self._content_length = content_length   # 上面获取
        self._upload_handlers = upload_handlers # 上面获取

    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict('', encoding=self._encoding), MultiValueDict()

        # See if the handler will want to take care of the parsing.
        # This allows overriding everything if somebody wants it.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data,
                                              self._meta,
                                              self._content_length,
                                              self._boundary,
                                              encoding)
            if result is not None:
                return result[0], result[1] # 返回

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # 未知

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters) 这里更新了 files
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()

                field_name = force_text(field_name, encoding, errors='replace') 不懂

                if item_type == FIELD:
                    # This is a post field,  post 域 we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read() 开始读

                        try:
                            data = str(raw_data).decode('base64')
                        except:
                            data = raw_data 

                    else:
                        data = field_stream.read()

                    self._post.appendlist(field_name,
                                          force_text(data, encoding, errors='replace'))

                elif item_type == FILE:
                    # This is a file, use the handler... 是一个文件
                    file_name = disposition.get('filename')

                    if not file_name:
                        continue

                    file_name = force_text(file_name, encoding, errors='replace') 不懂
                    file_name = self.IE_sanitize(unescape_entities(file_name)) 跟 IE 有关

                    content_type = meta_data.get('content-type', ('',))[0].strip()
                    try:
                        charset = meta_data.get('content-type', (0, {}))[1].get('charset', None)
                    except:
                        charset = None

                    try:
                        content_length = int(meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always read base64 streams by multiple of 4
                                over_bytes = len(chunk) % 4
                                if over_bytes:
                                    over_chunk = field_stream.read(4 - over_bytes)
                                    chunk += over_chunk

                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError("Could not decode base64 data: %r" % e)

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(chunk,
                                                                   counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        # Just use up the rest of this file...
                        exhaust(field_stream)

                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files # 返回 post 数据和 file

    处理文件的方法, 在这里会更新 _files
    def handle_file_complete(self, old_field_name, counters):
        """
        Handle all the signalling that takes place when a file is complete.
        """
        for i, handler in enumerate(self._upload_handlers):
            file_obj = handler.file_complete(counters[i])
            if file_obj:
                # If it returns a file object, then set the files dict.
                self._files.appendlist(force_text(old_field_name,
                                                     self._encoding,
                                                     errors='replace'),
                                       file_obj)
                break

    def IE_sanitize(self, filename):
        """Cleanup filename from Internet Explorer full paths.""" 
        return filename and filename[filename.rfind("\\")+1:].strip() 可能只需要文件名,而不用其他的
예제 #41
0
def prepare_base_review_request_mail(user,
                                     review_request,
                                     subject,
                                     in_reply_to,
                                     to_field,
                                     cc_field,
                                     template_name_base,
                                     context=None,
                                     extra_headers=None):
    """Return a customized review request e-mail.

    This is intended to be called by one of the ``prepare_{type}_mail``
    functions in this file. This method builds up a common context that all
    review request-related e-mails will use to render their templates, as well
    as handling user preferences regarding e-mail and add adding additional
    headers.

    Args:
        user (django.contrib.auth.models.User):
            The user who is sending the e-mail.

        review_request (reviewboard.reviews.models.review_request.ReviewRequest):
            The review request this e-mail is regarding.

        subject (unicode):
            The e-mail subject line.

        in_reply_to (unicode):
            The e-mail message ID this message is in response to or ``None``.

        to_field (set):
            The set of :py:class:`~django.contrib.auth.models.User` and
            :py:class`~reviewboard.reviews.models.group.Group`s to this e-mail
            will be sent to.

        cc_field (set):
            The set of :py:class:`~django.contrib.auth.models.User` and
            :py:class`~reviewboard.reviews.models.group.Group`s to be CC'ed on
            the e-mail.

        template_name_base (unicode):
            The name of the template to use to generate the e-mail without its
            extension. The plain-text version of the e-mail will append
            ``.txt`` to this and and the rich-text version of the e-mail will
            append ``.html``.

        context (dict, optional):
            Optional additional template rendering context.

        extra_headers (dict, optional):
            Optional additional headers to include.

    Returns:
        EmailMessage:
        The prepared e-mail message.
    """
    user_email = build_email_address_for_user(user)
    to_field = recipients_to_addresses(to_field, review_request.id)
    cc_field = recipients_to_addresses(cc_field, review_request.id) - to_field

    if not user.should_send_own_updates():
        to_field.discard(user_email)
        cc_field.discard(user_email)

    if not to_field and not cc_field:
        # This e-mail would have no recipients, so we won't send it.
        return None

    if not context:
        context = {}

    context.update({
        'user': user,
        'site_url': get_server_url(),
        'review_request': review_request,
    })
    local_site = review_request.local_site

    if local_site:
        context['local_site_name'] = local_site.name

    text_body = render_to_string('%s.txt' % template_name_base, context)
    html_body = render_to_string('%s.html' % template_name_base, context)
    server_url = get_server_url(local_site=local_site)

    headers = MultiValueDict({
        'X-ReviewBoard-URL': [server_url],
        'X-ReviewRequest-URL': [
            build_server_url(review_request.get_absolute_url(),
                             local_site=local_site)
        ],
        'X-ReviewGroup': [
            ', '.join(
                review_request.target_groups.values_list('name', flat=True))
        ],
    })

    if extra_headers:
        if not isinstance(extra_headers, MultiValueDict):
            extra_headers = MultiValueDict(
                (key, [value]) for key, value in six.iteritems(extra_headers))

        headers.update(extra_headers)

    if review_request.repository:
        headers['X-ReviewRequest-Repository'] = review_request.repository.name

    latest_diffset = review_request.get_latest_diffset()

    if latest_diffset:
        modified_files = set()

        for filediff in latest_diffset.files.all():
            if filediff.deleted or filediff.copied or filediff.moved:
                modified_files.add(filediff.source_file)

            if filediff.is_new or filediff.copied or filediff.moved:
                modified_files.add(filediff.dest_file)

        # The following code segment deals with the case where the client adds
        # a significant amount of files with large names. We limit the number
        # of headers; when more than 8192 characters are reached, we stop
        # adding filename headers.
        current_header_length = 0

        for filename in modified_files:
            current_header_length += (HEADER_ADDITIONAL_CHARACTERS_LENGTH +
                                      len(filename))

            if current_header_length > MAX_FILENAME_HEADERS_LENGTH:
                logging.warning(
                    'Unable to store all filenames in the '
                    'X-ReviewBoard-Diff-For headers when sending e-mail for '
                    'review request %s: The header size exceeds the limit of '
                    '%s. Remaining headers have been omitted.',
                    review_request.display_id, MAX_FILENAME_HEADERS_LENGTH)
                break

            headers.appendlist('X-ReviewBoard-Diff-For', filename)

    if settings.DEFAULT_FROM_EMAIL:
        sender = build_email_address(full_name=user.get_full_name(),
                                     email=settings.DEFAULT_FROM_EMAIL)
    else:
        sender = None

    return EmailMessage(subject=subject.strip(),
                        text_body=text_body.encode('utf-8'),
                        html_body=html_body.encode('utf-8'),
                        from_email=user_email,
                        sender=sender,
                        to=list(to_field),
                        cc=list(cc_field),
                        in_reply_to=in_reply_to,
                        headers=headers)
예제 #42
0
    def _handle_raw_input_without_file_stream(self,
                                              input_data,
                                              META,
                                              raw_content_length,
                                              boundary,
                                              encoding=None):
        """
        Replaces django.http.multipartparser.MultiPartParser.parse
        A rfc2388 multipart/form-data parser but replacing the file stream to the creation of empty files.
        Returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
        """
        # Create the data structures to be used later.
        _post = QueryDict(mutable=True)
        _files = MultiValueDict()

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        _chunk_size = min([2**31 - 4, self.chunk_size])

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(input_data, _chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None

        for item_type, meta_data, field_stream in Parser(stream, boundary):
            if old_field_name:
                # We run this at the beginning of the next loop
                # since we cannot be sure a file is complete until
                # we hit the next boundary/part of the multipart content.
                file_obj = self.file_complete(raw_content_length)
                if file_obj:
                    # If it returns a file object, then set the files dict.
                    _files.appendlist(
                        force_str(old_field_name, encoding, errors="replace"),
                        file_obj)
                old_field_name = None

            try:
                disposition = meta_data["content-disposition"][1]
                field_name = disposition["name"].strip()
            except (KeyError, IndexError, AttributeError):
                continue

            transfer_encoding = meta_data.get("content-transfer-encoding")
            if transfer_encoding is not None:
                transfer_encoding = transfer_encoding[0].strip()
            field_name = force_str(field_name, encoding, errors="replace")

            if item_type == FIELD:
                # NOTE: Parse fields as usual, same as ``MultiPartParser.parse``
                # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                num_post_keys += 1
                if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                        and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                        num_post_keys):
                    raise TooManyFieldsSent(
                        "The number of GET/POST parameters exceeded "
                        "settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.")

                # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                    read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                # This is a post field, we can just set it in the post
                if transfer_encoding == "base64":
                    raw_data = field_stream.read(size=read_size)
                    num_bytes_read += len(raw_data)
                    try:
                        data = base64.b64decode(raw_data)
                    except binascii.Error:
                        data = raw_data
                else:
                    data = field_stream.read(size=read_size)
                    num_bytes_read += len(data)

                # Add two here to make the check consistent with the
                # x-www-form-urlencoded check that includes '&='.
                num_bytes_read += len(field_name) + 2
                if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
                        num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                    raise RequestDataTooBig(
                        "Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
                    )

                _post.appendlist(field_name,
                                 force_str(data, encoding, errors="replace"))
            elif item_type == FILE:
                # NOTE: Parse files WITHOUT a stream.
                # This is a file, use the handler...
                file_name = disposition.get("filename")
                if file_name:
                    file_name = force_str(file_name,
                                          encoding,
                                          errors="replace")
                    file_name = self.sanitize_file_name(file_name)
                if not file_name:
                    continue

                content_type, content_type_extra = meta_data.get(
                    "content-type", ("", {}))
                content_type = content_type.strip()
                charset = content_type_extra.get("charset")
                content_length = None

                self.new_file(field_name, file_name, content_type,
                              content_length, charset, content_type_extra)

                # Handle file upload completions on next iteration.
                old_field_name = field_name
            else:
                # If this is neither a FIELD or a FILE, just exhaust the stream.
                exhaust(stream)

        # Make sure that the request data is all fed
        exhaust(input_data)

        _post._mutable = False
        return _post, _files
예제 #43
0
class EmailBaseModel(models.Model):
    sender = models.EmailField(_("sender"), max_length=255)
    from_str = models.CharField(_("from"), max_length=255)
    recipient = models.CharField(_("recipient"), max_length=255)
    subject = models.CharField(_("subject"), max_length=255, blank=True)
    body_plain = models.TextField(_("body plain"), blank=True)
    body_html = models.TextField(_("body html"), blank=True)
    stripped_text = models.TextField(_("stripped text"), blank=True)
    stripped_html = models.TextField(_("stripped html"), blank=True)
    stripped_signature = models.TextField(_("stripped signature"), blank=True)
    message_headers = models.TextField(_("message headers"), blank=True, help_text=_("Stored in JSON."))
    content_id_map = models.TextField(_("Content-ID map"), blank=True, help_text=_("Dictionary mapping Content-ID (CID) values to corresponding attachments. Stored in JSON."))
    received = models.DateTimeField(_("received"), auto_now_add=True)

    class Meta:
        abstract = True
        verbose_name = _("incoming email")
        verbose_name_plural = _("incoming emails")

    def __init__(self, *args, **kwargs):
        super(EmailBaseModel, self).__init__(*args, **kwargs)
        self._headers = None
        self._cids = None

    def _load_headers(self):
        self._headers = MultiValueDict()
        try:
            header_list = json.loads(self.message_headers)
            for key, val in header_list:
                self._headers.appendlist(key, val)
        except:
            logger.exception("Error parsing JSON data containing message headers")

    @property
    def headers(self):
        """Access message_headers parsed into MultiValueDict"""
        if self._headers is None:
            self._load_headers()
        return self._headers

    def _load_cids(self):
        if self.content_id_map:
            self._cids = {}
        try:
            self._cids = json.loads(self.content_id_map)
        except:
            logger.exception("Error parsing JSON data containing Content-IDs")

    @property
    def content_ids(self):
        """Access content_id_map as dict"""
        if not self.content_id_map:
            return
        if self._cids is None:
            self._load_cids()
        return self._cids

    @property
    def message_id(self):
        return self.headers.get('Message-Id', None)

    @property
    def cc(self):
        return self.headers.get('Cc', None)

    @property
    def references(self):
        return self.headers.get('References', None)

    @property
    def in_reply_to(self):
        return self.headers.get('In-Reply-To', None)

    @property
    def site_url(self): return 'https://%s%s' % (Site.objects.get_current().domain, reverse('comlink:mail', kwargs={'id': self.id}))

    def get_mailgun_data(self, stripped=True, footer=True):
        if stripped:
            body_plain = self.stripped_text
            body_html = self.stripped_html
        else:
            body_plain = self.body_plain
            body_html = self.body_html

        if footer:
            # Add in a footer
            text_footer = "\n\n-------------------------------------------\n*~*~*~* Sent through Nadine *~*~*~*\n%s" % self.site_url
            body_plain = body_plain + text_footer
            if body_html:
                html_footer = "<br><br>-------------------------------------------<br>*~*~*~* Sent through Nadine *~*~*~*\n%s" % self.site_url
                body_html = body_html + html_footer

        # Build and return our data
        mailgun_data = {"from": self.from_str,
                        "to": [self.recipient, ],
                        "cc": [self.cc, ],
                        "subject": self.subject,
                        "text": body_plain,
                        "html": body_html,
                        }
        return mailgun_data

    def __str__(self):
        return _("Message from {from_str}: {subject_trunc}").format(
            from_str=self.from_str,
            subject_trunc=self.subject[:20])
예제 #44
0
 def appendlist(self, key, value):
     self._assert_mutable()
     key = str_to_unicode(key, self.encoding)
     value = str_to_unicode(value, self.encoding)
     MultiValueDict.appendlist(self, key, value)
예제 #45
0
    def search(self,
               query_string,
               sort_by=None,
               start_offset=0,
               end_offset=None,
               fields='',
               highlight=False,
               facets=None,
               date_facets=None,
               query_facets=None,
               narrow_queries=None,
               spelling_query=None,
               limit_to_registered_models=None,
               debug=False,
               **kwargs):
        hits = 0
        results = list()

        if query_string in (None, ''):
            return {
                'results': results,
                'hits': len(results),
            }

        #build search criteria
        qs = SearchableObject.objects.all()
        extras = MultiValueDict()

        if narrow_queries:
            models = dict()
            for query in narrow_queries:
                #TODO properly parse narrow queries
                if ':' in query:
                    key, value = query.split(':', 1)
                    if key == 'django_ct':
                        if value not in models:
                            app_label, model = value.split('.', 1)
                            ct = ContentType.objects.get(app_label=app_label,
                                                         model=model)
                            models[value] = ct
                    else:
                        query_string[key] = value
            if models:
                limit_to_registered_models = models.values()

        if limit_to_registered_models is not None:
            qs = qs.filter(content_type__in=limit_to_registered_models)

        for key, value_list in query_string.iterlists():
            if key == 'content':
                search_params = ' '.join(query_string.getlist('content'))
                qs &= SearchableObject.objects.search(search_params)
            else:
                #TODO support multi value search
                extras.appendlist('where', self.get_subquery_sql())
                extras.appendlist('params', key)
                extras.appendlist('params', str(value_list[0]))

        if extras:
            qs = qs.extra(**extras)

        qs = qs.distinct()  #may or may not work....

        results = SearchObjectQuerySet(model=qs.model, query=qs.query)

        if debug:
            print len(results)

        return {
            'results': results,
            'hits': len(results),
        }
예제 #46
0
 def test_appendlist(self):
     d = MultiValueDict()
     d.appendlist('name', 'Adrian')
     d.appendlist('name', 'Simon')
     self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
예제 #47
0
 def test_appendlist(self):
     d = MultiValueDict()
     d.appendlist('name', 'Adrian')
     d.appendlist('name', 'Simon')
     self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
예제 #48
0
 def test_appendlist(self):
     d = MultiValueDict()
     d.appendlist("name", "Adrian")
     d.appendlist("name", "Simon")
     self.assertEqual(d.getlist("name"), ["Adrian", "Simon"])
 def _populate(self):
     # Short-circuit if called recursively in this thread to prevent
     # infinite recursion. Concurrent threads may call this at the same
     # time and will need to continue, so set 'populating' on a
     # thread-local variable.
     if getattr(self._local, 'populating', False):
         return
     try:
         self._local.populating = True
         lookups = MultiValueDict()
         namespaces = {}
         apps = {}
         path_prefix = self._get_path_prefix()
         for url_pattern in reversed(self.url_patterns):
             p_pattern = url_pattern.pattern.regex.pattern
             if p_pattern.startswith('^'):
                 p_pattern = p_pattern[1:]
             if isinstance(url_pattern, URLPattern):
                 self._callback_strs.add(url_pattern.lookup_str)
                 bits = normalize(url_pattern.pattern.regex.pattern)
                 lookups.appendlist(
                     url_pattern.callback,
                     (bits, p_pattern, url_pattern.default_args,
                      url_pattern.pattern.converters))
                 if url_pattern.name is not None:
                     lookups.appendlist(
                         url_pattern.name,
                         (bits, p_pattern, url_pattern.default_args,
                          url_pattern.pattern.converters))
             else:  # url_pattern is a URLResolver.
                 url_pattern._populate()
                 if url_pattern.app_name:
                     apps.setdefault(url_pattern.app_name,
                                     []).append(url_pattern.namespace)
                     namespaces[url_pattern.namespace] = (p_pattern,
                                                          url_pattern)
                 else:
                     for name in url_pattern.reverse_dict:
                         for matches, pat, defaults, converters in \
                                 url_pattern.reverse_dict.getlist(name):
                             new_matches = normalize(p_pattern + pat)
                             lookups.appendlist(
                                 name, (new_matches, p_pattern + pat, {
                                     **defaults,
                                     **url_pattern.default_kwargs
                                 }, {
                                     **self.pattern.converters,
                                     **url_pattern.pattern.converters,
                                     **converters
                                 }))
                     for namespace, (prefix, sub_pattern) in \
                             url_pattern.namespace_dict.items():
                         current_converters = \
                             url_pattern.pattern.converters
                         sub_pattern.pattern.converters.update(
                             current_converters)
                         namespaces[namespace] = (p_pattern + prefix,
                                                  sub_pattern)
                     for app_name, namespace_list in \
                             url_pattern.app_dict.items():
                         apps.setdefault(app_name,
                                         []).extend(namespace_list)
                 self._callback_strs.update(url_pattern._callback_strs)
         self._namespace_dict[path_prefix] = namespaces
         self._app_dict[path_prefix] = apps
         self._reverse_dict[path_prefix] = lookups
         self._populated = True
     finally:
         self._local.populating = False
 def appendlist(self, key, value):
     self._assert_mutable()
     MultiValueDict.appendlist(self, key, value)
예제 #51
0
class ModelTree(object):
    """A class to handle building and parsing a tree structure given a model.

        `root_model` - The root or "reference" model for the tree. Everything
        is relative to the root model.

        `exclude` - A list of models that are to be excluded from this tree.
        This is typically used to exclude models not intended to be exposed
        through this API.

        `routes` - Explicitly defines a join path between two models. Each
        route is made up of four components. Assuming some model hierarchy
        exists as shown below..

                                ModelA
                                /    \
                            ModelB  ModelC
                               |      |
                               \    ModelD
                                \    /
                                ModelE

        ..the traversal path from ModelA to ModelE is ambiguous. It could
        go from A -> B -> E or A -> C -> D -> E. By default, the shortest
        path is always choosen to reduce the number of joins necessary, but
        if ModelD did not exist..

                                ModelA
                                 /  \
                            ModelB  ModelC
                                 \  /
                                ModelE

        ..both paths only require two joins, thus the path that gets traversed
        first will only be the choosen one.

        To explicitly choose a path, a route can be defined. Taking the form::

            {
                'source': 'app1.model1',
                'target': 'app1.model2',
                'field': None,
                'symmetrical': None,
            }

        The `source` model defines the model where the join is being created
        from (the left side of the join). The `target` model defines the
        target model (the right side of the join). `field` is optional,
        but explicitly defines the model field that will be used for the join.
        This is useful if there are more than one foreign key relationships on
        between target and source. Finally, `symmetrical` is an optional
        boolean that ensures when the target and source models switch sides,
        the same join occurs on the same field.

        Routes are typically used for defining explicit join paths, but
        sometimes it is necessary to exclude join paths. For example if there
        are three possible paths and one should never occur.

        A modeltree config can have `required_routes` and `excluded_routes`
        entries, which are lists of routes in the above format.

        A required route is defined as follows: a join to the specified target
        model is only allowed from the specified source model.  A model can
        only be specified as a target once in the list of required routes.
        Note that the use of the `symmetrical` property of a route
        implicitly adds another route with target and source models swapped,
        so a model can be a target either directly or indirectly.  A single
        source model can participate in multiple required routes.

        An excluded route is more obvious: joining from the specified source
        model to the specified target model is not allowed.

    """                                                           # noqa: W605

    def __init__(self, model=None, **kwargs):
        if model is None and 'root_model' in kwargs:
            warnings.warn('The "root_model" key has been renamed to "model"',
                          DeprecationWarning)
            model = kwargs.get('root_model')

        if not model:
            raise TypeError('No "model" defined')

        excluded_models = kwargs.get('excluded_models', ())
        required_routes = kwargs.get('required_routes')

        if not excluded_models and 'exclude' in kwargs:
            warnings.warn(
                'The "exclude" key has been renamed to '
                '"excluded_models"', DeprecationWarning)

            excluded_models = kwargs.get('exclude', ())

        if not required_routes and 'routes' in kwargs:
            warnings.warn(
                'The "routes" key has been renamed to '
                '"required_routes"', DeprecationWarning)

            required_routes = kwargs.get('routes')

        excluded_routes = kwargs.get('excluded_routes')

        self.root_model = self.get_model(model, local=False)
        self.alias = kwargs.get('alias', None)

        # Models completely excluded from the tree
        self.excluded_models = [
            self.get_model(label, local=False) for label in excluded_models
        ]

        # Build the routes that are allowed/preferred
        self._required_joins = self._build_routes(
            required_routes, allow_redundant_targets=False)

        # Build the routes that are excluded
        self._excluded_joins = self._build_routes(excluded_routes)

        # cache each node relative their models
        self._nodes = {}

        # cache all app names relative to their model names i.e. supporting
        # multiple apps with models of the same name
        self._model_apps = MultiValueDict({})

        # cache (app, model) pairs with the respective model class
        self._models = {}

        self._build()

    def __repr__(self):
        return '<ModelTree for {0}>'.format(self.root_model.__name__)

    def _get_local_model(self, model_name, app_name=None):
        "Attempts to get a model from local cache."
        if not app_name:
            app_names = self._model_apps.getlist(model_name)
            # No apps found with this model
            if not app_names:
                return

            # Multiple apps found for this model
            if len(app_names) > 1:
                raise ModelNotUnique(
                    'The model "{0}" is not unique. '
                    'Specify the app name as well.'.format(model_name))

            app_name = app_names[0]

        try:
            return self._models[(app_name, model_name)]
        except KeyError:
            pass

    def _get_model(self, model_name, app_name=None):
        "Attempts to get a model from application cache."
        model = None

        # If an app name is supplied we can reduce it down to only models
        # within that particular app.
        if app_name:
            model = apps.get_model(app_name, model_name)
        else:
            # Attempt to find the model based on the name. Since we don't
            # have the app name, if a model of the same name exists multiple
            # times, we need to throw an error.
            for app, app_models in list(apps.app_models.items()):
                if model_name in app_models:
                    if model is not None:
                        raise ModelNotUnique(
                            'The model "{0}" is not unique. '
                            'Specify the app name as well.'.format(model_name))

                    model = app_models[model_name]

        return model

    def get_model(self, model_name=None, app_name=None, local=True):
        """A few variations are handled here for increased flexibility:

            - if a model class is given, simply echo the model back

            - if a app-model label e.g. 'library.book', is passed, the
            standard app_models cache is used

            - if `app_name` and `model_name` is provided, the standard
            app_models cache is used

            - if only `model_name` is supplied, attempt to find the model
            across all apps. if the model is found more than once, an error
            is thrown

            - if `local` is true, only models related to this `ModelTree`
            instance are searched through
        """
        model = None

        if not (app_name or model_name):
            return self.root_model

        # model class
        if inspect.isclass(model_name) and \
                issubclass(model_name, models.Model):
            # set it initially for either local and non-local
            model = model_name

            # additional check to ensure the model exists locally, reset to
            # None if it does not
            if local and model not in self._nodes:
                model = None

        # handle string-based arguments
        else:
            # handle the syntax 'library.book'
            if model_name:
                if '.' in model_name:
                    app_name, model_name = model_name.split('.', 1)
                model_name = model_name.lower()

            if local:
                model = self._get_local_model(model_name, app_name)
            else:
                model = self._get_model(model_name, app_name)

        # both mechanisms above may result in no model being found
        if model is None:
            if local:
                raise ModelNotRelated(
                    'No model found named "{0}"'.format(model_name))
            else:
                raise ModelDoesNotExist(
                    'No model found named "{0}"'.format(model_name))

        return model

    def get_field(self, name, model=None):
        if model is None:
            model = self.root_model
        return model._meta.get_field(name)

    def _build_routes(self, routes, allow_redundant_targets=True):
        """Routes provide a means of specifying JOINs between two tables.

        routes - a collection of dicts defining source->target mappings
                 with optional `field` specifier and `symmetrical` attribute.

        allow_redundant_targets - whether two routes in this collection
                 are allowed to have the same target - this should NOT
                 be allowed for required routes.
        """
        routes = routes or ()
        joins = {}
        targets_seen = set()

        for route in routes:
            if isinstance(route, dict):
                source_label = route.get('source')
                target_label = route.get('target')
                field_label = route.get('field')
                symmetrical = route.get('symmetrical')
            else:
                warnings.warn('Routes are now defined as dicts',
                              DeprecationWarning)
                source_label, target_label, field_label, symmetrical = route

            # get models
            source = self.get_model(source_label, local=False)
            target = self.get_model(target_label, local=False)

            field = None

            # get field
            if field_label:
                model_name, field_name = field_label.split('.', 1)
                model_name = model_name.lower()

                # determine which model the join field specified exists on
                if model_name == source.__name__.lower():
                    field = self.get_field(field_name, source)
                elif model_name == target.__name__.lower():
                    field = self.get_field(field_name, target)
                else:
                    raise TypeError('model for join field, "{0}", '
                                    'does not exist'.format(field_name))

                if isinstance(field, (ManyToOneRel, ManyToManyRel)):
                    field = field.field

            if not allow_redundant_targets:
                if target in targets_seen:
                    raise ValueError('Model {0} cannot be the target of '
                                     'more than one route in this list'.format(
                                         target_label))
                else:
                    targets_seen.add(target)

            # The `joins` hash defines pairs which are explicitly joined
            # via the specified field.  If no field is defined, then the
            # join field is implied or does not matter; the route is reduced
            #  to a straight lookup.
            joins[(source, target)] = field

            if symmetrical:
                if not allow_redundant_targets:
                    if source in targets_seen:
                        raise ValueError(
                            'Model {0} cannot be the target of '
                            'more than one route in this list'.format(
                                source_label))
                    else:
                        targets_seen.add(source)

                joins[(target, source)] = field

        return joins

    def _join_allowed(self, source, target, field=None):
        """Checks if the join between `source` and `target` via `field`
        is allowed.
        """
        join = (source, target)

        # No circles
        if target == source:
            return False

        # Prevent join to excluded models
        if target in self.excluded_models:
            return False

        # Never go back through the root
        if target == self.root_model:
            return False

        # Apply excluded joins if any
        if join in self._excluded_joins:
            _field = self._excluded_joins[join]
            if not _field:
                return False
            elif _field and _field == field:
                return False

        # Check if the join is allowed by a required rule
        for (_source, _target), _field in list(self._required_joins.items()):
            if _target == target:
                if _source != source:
                    return False

                # If a field is supplied, check to see if the field is allowed
                # for this join.
                if field and _field and _field != field:
                    return False

        return True

    def _add_node(self, parent, model, relation, reverse, related_name,
                  accessor_name, nullable, depth):
        """Adds a node to the tree only if a node of the same `model' does not
        already exist in the tree with smaller depth. If the node is added, the
        tree traversal continues finding the node's relations.

        Conditions in which the node will fail to be added:

            - a reverse relationship is blocked via the '+'
            - the model is excluded completely
            - the model is going back the same path it came from
            - the model is circling back to the root_model
            - the model does not come from an explicitly declared parent model


        This is running in a recursive way with _find_relations
        They keep calling each other as they go through all the models
        """
        # Reverse relationships
        if reverse and '+' in related_name:
            return

        node_hash = self._nodes.get(model, None)

        # don't add node if a path with a shorter depth exists. this is applied
        # after the correct join has been determined. generally if a route is
        # defined for relation, this will never be an issue since there would
        # only be one path available. if a route is not defined, the shorter
        # path will be found
        if not node_hash or node_hash['depth'] > depth:
            if node_hash:
                node_hash['parent'].remove_child(model)

            node = ModelTreeNode(model, parent, relation, reverse,
                                 related_name, accessor_name, nullable, depth)

            self._nodes[model] = {
                'parent': parent,
                'depth': depth,
                'node': node,
            }

            node = self._find_relations(node, depth)
            parent.children.append(node)

    def _find_relations(self, node, depth=0):
        """
        Finds all relations given a node.
        
        This runs in a recursive way with _add_node. They keep calling each
        other based on depth.
        """
        depth += 1

        model = node.model

        # NOTE: the many-to-many relations are evaluated first to prevent
        # 'through' models being bound as a ForeignKey relationship.
        fields = sorted(model._meta.get_fields(),
                        reverse=True,
                        key=lambda f: bool(f.many_to_many))

        # determine relational fields to determine paths
        # f.rel changed to f.remote_field for django2
        # f.rel.to changed to f.remote_field.model for django2
        forward_fields = [
            f for f in fields
            if (f.one_to_one or f.many_to_many or f.many_to_one) and (
                f.concrete or not f.auto_created) and f.remote_field is
            not None  # Generic foreign keys do not define rel.
            and self._join_allowed(f.model, f.remote_field.model, f)
        ]
        reverse_fields = [
            f for f in fields
            if (f.one_to_many or f.one_to_one or f.many_to_many) and (
                not f.concrete and f.auto_created)
            and self._join_allowed(f.model, f.related_model, f.field)
        ]

        def get_relation_type(f):
            if f.one_to_one:
                return 'onetone'
            elif f.many_to_many:
                return 'manytomany'
            elif f.one_to_many or f.many_to_one:
                return 'foreignkey'

        # Iterate over forward relations
        # changed f.rel.to to f.remote_field.model for django2
        for f in forward_fields:
            null = f.many_to_many or f.null
            kwargs = {
                'parent': node,
                'model': f.remote_field.model,
                'relation': get_relation_type(f),
                'reverse': False,
                'related_name': f.name,
                'accessor_name': f.name,
                'nullable': null,
                'depth': depth,
            }
            self._add_node(**kwargs)

        # Iterate over reverse relations.
        for r in reverse_fields:
            kwargs = {
                'parent': node,
                'model': r.related_model,
                'relation': get_relation_type(r),
                'reverse': True,
                'related_name': r.field.related_query_name(),
                'accessor_name': r.get_accessor_name(),
                'nullable': True,
                'depth': depth,
            }
            self._add_node(**kwargs)

        return node

    def _build(self):
        node = ModelTreeNode(self.root_model)
        self._root_node = self._find_relations(node)

        self._nodes[self.root_model] = {
            'parent': None,
            'depth': 0,
            'node': self._root_node,
        }

        # store local cache of all models in this tree by name
        for model in self._nodes:
            model_name = model._meta.object_name.lower()
            app_name = model._meta.app_label

            self._model_apps.appendlist(model_name, app_name)
            self._models[(app_name, model_name)] = model

    @property
    def root_node(self):
        "Returns the `root_node` and implicitly builds the tree."
        if not hasattr(self, '_root_node'):
            self._build()
        return self._root_node

    def _node_path_to_model(self, model, node, path=[]):
        "Returns a list representing the path of nodes to the model."
        if node.model == model:
            return path

        for child in node.children:
            mpath = self._node_path_to_model(model, child, path + [child])
            # TODO why is this condition here?
            if mpath:
                return mpath

    def _node_path(self, model):
        "Returns a list of nodes thats defines the path of traversal."
        model = self.get_model(model)
        return self._node_path_to_model(model, self.root_node)

    def get_joins(self, model):
        """Returns a list of JOIN connections that can be manually applied to a
        QuerySet object. See `.add_joins()`

        This allows for the ORM to handle setting up the JOINs which may be
        different depending on the QuerySet being altered.
        """
        node_path = self._node_path(model)

        joins = []
        for i, node in enumerate(node_path):
            # ignore each subsequent first join in the set of joins for a
            # given model
            table, path_joins = node.get_joins()
            if i == 0:
                joins.append(table)
            joins.extend(path_joins)

        return joins

    def query_string(self, model):
        nodes = self._node_path(model)
        return str('__'.join(n.related_name for n in nodes))

    def query_string_for_field(self, field, operator=None, model=None):
        """Takes a `models.Field` instance and returns a query string relative
        to the root model.
        """
        if model:
            if model._meta.proxy and \
                    model._meta.proxy_for_model is not field.model:
                raise ModelTreeError('proxied model must be the field model')

        else:
            model = field.model

        # When an explicit reverse field is used, simply use it directly
        if isinstance(field, (ManyToManyRel, ManyToOneRel)):
            toks = [field.field.related_query_name()]
        else:
            path = self.query_string(model)

            if path:
                toks = [path, field.name]
            else:
                toks = [field.name]

        if operator is not None:
            toks.append(operator)

        return str('__'.join(toks))

    def query_condition(self, field, operator, value, model=None):
        "Conveniece method for constructing a `Q` object for a given field."
        lookup = self.query_string_for_field(field,
                                             operator=operator,
                                             model=model)
        return Q(**{lookup: value})

    def add_joins(self, model, queryset=None):
        """Sets up all necessary joins up to the given model on the queryset.
        Returns the alias to the model's database table.
        """
        if queryset is None:
            clone = self.get_queryset()
        else:
            clone = queryset._clone()

        alias = None

        for i, join in enumerate(self.get_joins(model)):
            if isinstance(join, BaseTable):
                alias_map = clone.query.alias_map
                if join.table_alias in alias_map or \
                        join.table_name in alias_map:
                    continue
            alias = clone.query.join(join)

        # this implies the join is redundant and occurring on the root model's
        # table
        if alias is None:
            alias = clone.query.get_initial_alias()

        return clone, alias

    def add_select(self, *fields, **kwargs):
        "Replaces the `SELECT` columns with the ones provided."
        if 'queryset' in kwargs:
            queryset = kwargs.pop('queryset')
        else:
            queryset = self.get_queryset()

        queryset.query.default_cols = False
        include_pk = kwargs.pop('include_pk', True)

        if include_pk:
            fields = [self.root_model._meta.pk] + list(fields)

        aliases = []

        for pair in fields:
            if isinstance(pair, (list, tuple)):
                model, field = pair
            else:
                field = pair
                model = field.model

            queryset, alias = self.add_joins(model, queryset)

            aliases.append(Col(alias, field, field))

        if aliases:
            queryset.query.select = aliases

        return queryset

    def get_queryset(self):
        "Returns a QuerySet relative to the `root_model`."
        return self.root_model._default_manager.get_queryset()
예제 #52
0
def send_review_mail(user,
                     review_request,
                     subject,
                     in_reply_to,
                     to_field,
                     cc_field,
                     text_template_name,
                     html_template_name,
                     context=None,
                     extra_headers=None):
    """Format and send an e-mail out.

    Args:
        user (django.contrib.auth.models.User):
            The user who is sending the e-mail.

        review_request (reviewboard.reviews.models.ReviewRequest):
            The review request that the e-mail is about.

        subject (unicode):
            The subject of the e-mail address.

        in_reply_to (unicode):
            The e-mail message ID for threading.

        to_field (list):
            The recipients to send the e-mail to. This should be a list of
            :py:class:`Users <django.contrib.auth.models.User>` and
            :py:class:`Groups <reviewboard.reviews.models.Group>`.

        cc_field (list):
            The addresses to be CC'ed on the e-mail. This should be a list of
            :py:class:`Users <django.contrib.auth.models.User>` and
            :py:class:`Groups <reviewboard.reviews.models.Group>`.

        text_template_name (unicode):
            The name for the text e-mail template.

        html_template_name (unicode):
            The name for the HTML e-mail template.

        context (dict):
            Optional extra context to provide to the template.

        extra_headers (dict):
            Either a dict or
            :py:class:`~django.utils.datastructures.MultiValueDict` providing
            additional headers to send with the e-mail.

    Returns:
        unicode: The resulting e-mail message ID.
    """
    current_site = Site.objects.get_current()
    local_site = review_request.local_site
    from_email = build_email_address_for_user(user)

    to_field = recipients_to_addresses(to_field, review_request.id)
    cc_field = recipients_to_addresses(cc_field, review_request.id) - to_field

    if not user.should_send_own_updates():
        user_email = build_email_address_for_user(user)
        to_field.discard(user_email)
        cc_field.discard(user_email)

    if not to_field and not cc_field:
        # Nothing to send.
        return

    siteconfig = current_site.config.get()
    domain_method = siteconfig.get("site_domain_method")

    if not context:
        context = {}

    context['user'] = user
    context['domain'] = current_site.domain
    context['domain_method'] = domain_method
    context['review_request'] = review_request

    if review_request.local_site:
        context['local_site_name'] = review_request.local_site.name

    text_body = render_to_string(text_template_name, context)
    html_body = render_to_string(html_template_name, context)

    base_url = get_server_url(local_site=local_site)

    headers = MultiValueDict({
        'X-ReviewBoard-URL': [base_url],
        'X-ReviewRequest-URL':
        [urljoin(base_url, review_request.get_absolute_url())],
        'X-ReviewGroup': [
            ', '.join(group.name
                      for group in review_request.target_groups.all())
        ],
    })

    if extra_headers:
        if not isinstance(extra_headers, MultiValueDict):
            extra_headers = MultiValueDict(
                (key, [value])
                for (key, value) in six.iteritems(extra_headers))

        headers.update(extra_headers)

    if review_request.repository:
        headers['X-ReviewRequest-Repository'] = review_request.repository.name

    latest_diffset = review_request.get_latest_diffset()

    if latest_diffset:
        modified_files = set()

        for filediff in latest_diffset.files.all():
            if filediff.deleted or filediff.copied or filediff.moved:
                modified_files.add(filediff.source_file)

            if filediff.is_new or filediff.copied or filediff.moved:
                modified_files.add(filediff.dest_file)

        for filename in modified_files:
            headers.appendlist('X-ReviewBoard-Diff-For', filename)

    subject = subject.strip()
    to_field = list(to_field)
    cc_field = list(cc_field)

    if settings.DEFAULT_FROM_EMAIL:
        sender = build_email_address(full_name=user.get_full_name(),
                                     email=settings.DEFAULT_FROM_EMAIL)
    else:
        sender = None

    message = EmailMessage(subject=subject,
                           text_body=text_body.encode('utf-8'),
                           html_body=html_body.encode('utf-8'),
                           from_email=from_email,
                           sender=sender,
                           to=to_field,
                           cc=cc_field,
                           in_reply_to=in_reply_to,
                           headers=headers)

    try:
        message.send()
    except Exception:
        logging.exception(
            "Error sending e-mail notification with subject "
            "'%s' on behalf of '%s' to '%s'", subject, from_email,
            ','.join(to_field + cc_field))

    return message.message_id
예제 #53
0
class MultiPartParser(object):
    """
    A rfc2388 multipart/form-data parser.

    ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
    and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
    """

    def __init__(self, META, input_data, upload_handlers, encoding=None):
        """
        Initialize the MultiPartParser object.

        :META:
            The standard ``META`` dictionary in Django request objects.
        :input_data:
            The raw post data, as a file-like object.
        :upload_handlers:
            A list of UploadHandler instances that perform operations on the uploaded
            data.
        :encoding:
            The encoding with which to treat the incoming data.
        """

        #
        # Content-Type should contain multipart and the boundary information.
        #

        content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
        if not content_type.startswith('multipart/'):
            raise MultiPartParserError('Invalid Content-Type: %s' % content_type)

        # Parse the header to get the boundary to split the parts.
        ctypes, opts = parse_header(content_type.encode('ascii'))
        boundary = opts.get('boundary')
        if not boundary or not cgi.valid_boundary(boundary):
            raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)

        # Content-Length should contain the length of the body we are about
        # to receive.
        try:
            content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
        except (ValueError, TypeError):
            content_length = 0

        if content_length < 0:
            # This means we shouldn't continue...raise an error.
            raise MultiPartParserError("Invalid content length: %r" % content_length)

        if isinstance(boundary, six.text_type):
            boundary = boundary.encode('ascii')
        self._boundary = boundary
        self._input_data = input_data

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
        self._chunk_size = min([2 ** 31 - 4] + possible_sizes)

        self._meta = META
        self._encoding = encoding or settings.DEFAULT_CHARSET
        self._content_length = content_length
        self._upload_handlers = upload_handlers

    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Returns a tuple containing the POST and FILES dictionary, respectively.
        """
        # We have to import QueryDict down here to avoid a circular import.
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict('', encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(self._input_data,
                                              self._meta,
                                              self._content_length,
                                              self._boundary,
                                              encoding)
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict('', mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        try:
            for item_type, meta_data, field_stream in Parser(stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read()
                        try:
                            data = base64.b64decode(raw_data)
                        except _BASE64_DECODE_ERROR:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self._post.appendlist(field_name,
                                          force_text(data, encoding, errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if not file_name:
                        continue
                    file_name = force_text(file_name, encoding, errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type, content_type_extra = meta_data.get('content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(field_name, file_name,
                                                 content_type, content_length,
                                                 charset, content_type_extra)
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 - remaining)
                                    stripped_chunk += b"".join(over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as e:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    msg = "Could not decode base64 data: %r" % e
                                    six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(chunk,
                                                                   counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # If the chunk received by the handler is None, then don't continue.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        for handler in handlers:
            retval = handler.upload_complete()
            if retval:
                break

        return self._post, self._files

    def handle_file_complete(self, old_field_name, counters):
        """
        Handle all the signaling that takes place when a file is complete.
        """
        for i, handler in enumerate(self._upload_handlers):
            file_obj = handler.file_complete(counters[i])
            if file_obj:
                # If it returns a file object, then set the files dict.
                self._files.appendlist(
                    force_text(old_field_name, self._encoding, errors='replace'),
                    file_obj)
                break

    def IE_sanitize(self, filename):
        """Cleanup filename from Internet Explorer full paths."""
        return filename and filename[filename.rfind("\\") + 1:].strip()

    def _close_files(self):
        # Free up all file handles.
        # FIXME: this currently assumes that upload handlers store the file as 'file'
        # We should document that... (Maybe add handler.free_file to complement new_file)
        for handler in self._upload_handlers:
            if hasattr(handler, 'file'):
                handler.file.close()
예제 #54
0
파일: box.py 프로젝트: whit/ella
 def resolve_params(self, context):
     " Parse the parameters into a dict. "
     params = MultiValueDict()
     for key, value in self.parse_params(self.nodelist.render(context)):
         params.appendlist(key, value)
     return params
class MultiPartParser:
    """
    A rfc2388 multipart/form-data parser.

    ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
    and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
    """
    def __init__(self, META, input_data, upload_handlers, encoding=None):
        """
        Initialize the MultiPartParser object.

        :META:
            The standard ``META`` dictionary in Django request objects.
        :input_data:
            The raw post data, as a file-like object.
        :upload_handlers:
            A list of UploadHandler instances that perform operations on the
            uploaded data.
        :encoding:
            The encoding with which to treat the incoming data.
        """
        # Content-Type should contain multipart and the boundary information.
        content_type = META.get('CONTENT_TYPE', '')
        if not content_type.startswith('multipart/'):
            raise MultiPartParserError('Invalid Content-Type: %s' %
                                       content_type)

        # Parse the header to get the boundary to split the parts.
        try:
            ctypes, opts = parse_header(content_type.encode('ascii'))
        except UnicodeEncodeError:
            raise MultiPartParserError(
                'Invalid non-ASCII Content-Type in multipart: %s' %
                force_str(content_type))
        boundary = opts.get('boundary')
        if not boundary or not cgi.valid_boundary(boundary):
            raise MultiPartParserError('Invalid boundary in multipart: %s' %
                                       force_str(boundary))

        # Content-Length should contain the length of the body we are about
        # to receive.
        try:
            content_length = int(META.get('CONTENT_LENGTH', 0))
        except (ValueError, TypeError):
            content_length = 0

        if content_length < 0:
            # This means we shouldn't continue...raise an error.
            raise MultiPartParserError("Invalid content length: %r" %
                                       content_length)

        if isinstance(boundary, str):
            boundary = boundary.encode('ascii')
        self._boundary = boundary
        self._input_data = input_data

        # For compatibility with low-level network APIs (with 32-bit integers),
        # the chunk size should be < 2^31, but still divisible by 4.
        possible_sizes = [
            x.chunk_size for x in upload_handlers if x.chunk_size
        ]
        self._chunk_size = min([2**31 - 4] + possible_sizes)

        self._meta = META
        self._encoding = encoding or settings.DEFAULT_CHARSET
        self._content_length = content_length
        self._upload_handlers = upload_handlers

    def parse(self):
        """
        Parse the POST data and break it into a FILES MultiValueDict and a POST
        MultiValueDict.

        Return a tuple containing the POST and FILES dictionary, respectively.
        """
        from django.http import QueryDict

        encoding = self._encoding
        handlers = self._upload_handlers

        # HTTP spec says that Content-Length >= 0 is valid
        # handling content-length == 0 before continuing
        if self._content_length == 0:
            return QueryDict(encoding=self._encoding), MultiValueDict()

        # See if any of the handlers take care of the parsing.
        # This allows overriding everything if need be.
        for handler in handlers:
            result = handler.handle_raw_input(
                self._input_data,
                self._meta,
                self._content_length,
                self._boundary,
                encoding,
            )
            # Check to see if it was handled
            if result is not None:
                return result[0], result[1]

        # Create the data structures to be used later.
        self._post = QueryDict(mutable=True)
        self._files = MultiValueDict()

        # Instantiate the parser and stream:
        stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))

        # Whether or not to signal a file-completion at the beginning of the loop.
        old_field_name = None
        counters = [0] * len(handlers)

        # Number of bytes that have been read.
        num_bytes_read = 0
        # To count the number of keys in the request.
        num_post_keys = 0
        # To limit the amount of data read from the request.
        read_size = None

        try:
            for item_type, meta_data, field_stream in Parser(
                    stream, self._boundary):
                if old_field_name:
                    # We run this at the beginning of the next loop
                    # since we cannot be sure a file is complete until
                    # we hit the next boundary/part of the multipart content.
                    self.handle_file_complete(old_field_name, counters)
                    old_field_name = None

                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')
                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()
                field_name = force_str(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
                    num_post_keys += 1
                    if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
                            and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS <
                            num_post_keys):
                        raise TooManyFieldsSent(
                            'The number of GET/POST parameters exceeded '
                            'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.')

                    # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
                    if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
                        read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read

                    # This is a post field, we can just set it in the post
                    if transfer_encoding == 'base64':
                        raw_data = field_stream.read(size=read_size)
                        num_bytes_read += len(raw_data)
                        try:
                            data = base64.b64decode(raw_data)
                        except binascii.Error:
                            data = raw_data
                    else:
                        data = field_stream.read(size=read_size)
                        num_bytes_read += len(data)

                    # Add two here to make the check consistent with the
                    # x-www-form-urlencoded check that includes '&='.
                    num_bytes_read += len(field_name) + 2
                    if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
                            and num_bytes_read >
                            settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
                        raise RequestDataTooBig(
                            'Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.'
                        )

                    self._post.appendlist(
                        field_name, force_str(data, encoding,
                                              errors='replace'))
                elif item_type == FILE:
                    # This is a file, use the handler...
                    file_name = disposition.get('filename')
                    if file_name:
                        file_name = force_str(file_name,
                                              encoding,
                                              errors='replace')
                        file_name = self.IE_sanitize(html.unescape(file_name))
                    if not file_name:
                        continue

                    content_type, content_type_extra = meta_data.get(
                        'content-type', ('', {}))
                    content_type = content_type.strip()
                    charset = content_type_extra.get('charset')

                    try:
                        content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        content_length = None

                    counters = [0] * len(handlers)
                    try:
                        for handler in handlers:
                            try:
                                handler.new_file(
                                    field_name,
                                    file_name,
                                    content_type,
                                    content_length,
                                    charset,
                                    content_type_extra,
                                )
                            except StopFutureHandlers:
                                break

                        for chunk in field_stream:
                            if transfer_encoding == 'base64':
                                # We only special-case base64 transfer encoding
                                # We should always decode base64 chunks by multiple of 4,
                                # ignoring whitespace.

                                stripped_chunk = b"".join(chunk.split())

                                remaining = len(stripped_chunk) % 4
                                while remaining != 0:
                                    over_chunk = field_stream.read(4 -
                                                                   remaining)
                                    stripped_chunk += b"".join(
                                        over_chunk.split())
                                    remaining = len(stripped_chunk) % 4

                                try:
                                    chunk = base64.b64decode(stripped_chunk)
                                except Exception as exc:
                                    # Since this is only a chunk, any error is an unfixable error.
                                    raise MultiPartParserError(
                                        "Could not decode base64 data."
                                    ) from exc

                            for i, handler in enumerate(handlers):
                                chunk_length = len(chunk)
                                chunk = handler.receive_data_chunk(
                                    chunk, counters[i])
                                counters[i] += chunk_length
                                if chunk is None:
                                    # Don't continue if the chunk received by
                                    # the handler is None.
                                    break

                    except SkipFile:
                        self._close_files()
                        # Just use up the rest of this file...
                        exhaust(field_stream)
                    else:
                        # Handle file upload completions on next iteration.
                        old_field_name = field_name
                else:
                    # If this is neither a FIELD or a FILE, just exhaust the stream.
                    exhaust(stream)
        except StopUpload as e:
            self._close_files()
            if not e.connection_reset:
                exhaust(self._input_data)
        else:
            # Make sure that the request data is all fed
            exhaust(self._input_data)

        # Signal that the upload has completed.
        # any() shortcircuits if a handler's upload_complete() returns a value.
        any(handler.upload_complete() for handler in handlers)
        self._post._mutable = False
        return self._post, self._files

    def handle_file_complete(self, old_field_name, counters):
        """
        Handle all the signaling that takes place when a file is complete.
        """
        for i, handler in enumerate(self._upload_handlers):
            file_obj = handler.file_complete(counters[i])
            if file_obj:
                # If it returns a file object, then set the files dict.
                self._files.appendlist(
                    force_str(old_field_name, self._encoding,
                              errors='replace'), file_obj)
                break

    def IE_sanitize(self, filename):
        """Cleanup filename from Internet Explorer full paths."""
        return filename and filename[filename.rfind("\\") + 1:].strip()

    def _close_files(self):
        # Free up all file handles.
        # FIXME: this currently assumes that upload handlers store the file as 'file'
        # We should document that... (Maybe add handler.free_file to complement new_file)
        for handler in self._upload_handlers:
            if hasattr(handler, 'file'):
                handler.file.close()
예제 #56
0
class IntelligentUploadHandler(FileUploadHandler):
    """
    An upload handler which overrides the default multipart parser to allow
    simultaneous parsing of fields and files... intelligently. Subclass this
    for real and true awesomeness.
    """
    def __init__(self, *args, **kwargs):
        super(IntelligentUploadHandler, self).__init__(*args, **kwargs)

    def field_parsed(self, field_name, field_value):
        """
        A callback method triggered when a non-file field has been parsed
        successfully by the parser. Use this to listen for new fields being
        parsed.
        """
        pass

    def handle_raw_input(self,
                         input_data,
                         META,
                         content_length,
                         boundary,
                         encoding=None):
        """
        Parse the raw input from the HTTP request and split items into fields
        and files, executing callback methods as necessary.

        Shamelessly adapted and borrowed from
        django.http.multiparser.MultiPartParser.
        """
        # following suit from the source class, this is imported here to avoid
        # a potential circular import
        from django.http import QueryDict

        # create return values
        self.POST = QueryDict('', mutable=True)
        self.FILES = MultiValueDict()

        # initialize the parser and stream
        stream = LazyStream(ChunkIter(input_data, self.chunk_size))
        # whether or not to signal a file-completion at the beginning
        # of the loop.
        old_field_name = None
        counter = 0

        try:
            for item_type, meta_data, field_stream in Parser(stream, boundary):
                if old_field_name:
                    # we run this test at the beginning of the next loop since
                    # we cannot be sure a file is complete until we hit the
                    # next boundary/part of the multipart content.
                    file_obj = self.file_complete(counter)

                    if file_obj:
                        # if we return a file object, add it to the files dict
                        self.FILES.appendlist(
                            force_text(old_field_name,
                                       encoding,
                                       errors='replace'), file_obj)

                    # wipe it out to prevent havoc
                    old_field_name = None
                try:
                    disposition = meta_data['content-disposition'][1]
                    field_name = disposition['name'].strip()
                except (KeyError, IndexError, AttributeError):
                    continue

                transfer_encoding = meta_data.get('content-transfer-encoding')

                if transfer_encoding is not None:
                    transfer_encoding = transfer_encoding[0].strip()

                field_name = force_text(field_name, encoding, errors='replace')

                if item_type == FIELD:
                    # this is a POST field
                    if transfer_encoding == "base64":
                        raw_data = field_stream.read()
                        try:
                            data = str(raw_data).decode('base64')
                        except:
                            data = raw_data
                    else:
                        data = field_stream.read()

                    self.POST.appendlist(
                        field_name, force_text(data,
                                               encoding,
                                               errors='replace'))

                    # trigger listener
                    self.field_parsed(field_name, self.POST.get(field_name))
                elif item_type == FILE:
                    # this is a file
                    file_name = disposition.get('filename')

                    if not file_name:
                        continue

                    # transform the file name
                    file_name = force_text(file_name,
                                           encoding,
                                           errors='replace')
                    file_name = self.IE_sanitize(unescape_entities(file_name))

                    content_type = meta_data.get('content-type',
                                                 ('', ))[0].strip()

                    try:
                        charset = meta_data.get('content-type', (0, {}))[1]\
                            .get('charset', None)
                    except:
                        charset = None

                    try:
                        file_content_length = int(
                            meta_data.get('content-length')[0])
                    except (IndexError, TypeError, ValueError):
                        file_content_length = None

                    counter = 0

                    # now, do the important file stuff
                    try:
                        # alert on the new file
                        kwargs = {
                            'content_type': content_type,
                            'content_length': file_content_length,
                            'charset': charset
                        }
                        self.new_file(field_name, file_name, **kwargs)

                        # chubber-chunk it
                        for chunk in field_stream:
                            # we need AES compatibles blocks (multiples of 16 bits)
                            over_bytes = len(chunk) % 16
                            if over_bytes:
                                over_chunk =\
                                    field_stream.read(16 - over_bytes)
                                chunk += over_chunk

                            if transfer_encoding == "base64":
                                try:
                                    chunk = base64.b64decode(chunk)
                                except Exception as e:
                                    # since this is anly a chunk, any
                                    # error is an unfixable error
                                    raise MultiPartParserError(
                                        "Could not decode base64 data: %r" % e)

                            chunk_length = len(chunk)
                            self.receive_data_chunk(chunk, counter)
                            counter += chunk_length

                            if counter > settings.UPLOAD_FILE_SIZE_LIMIT:
                                raise SkipFile('File is too big.')
                            # ... and we're done
                    except SkipFile:
                        # just eat the rest
                        exhaust(field_stream)
                    else:
                        # handle file upload completions on next iteration
                        old_field_name = field_name

        except StopUpload as e:
            # if we get a request to stop the upload,
            # exhaust it if no con reset
            if not e.connection_reset:
                exhaust(input_data)
        else:
            # make sure that the request data is all fed
            exhaust(input_data)

        # signal the upload has been completed
        self.upload_complete()

        return self.POST, self.FILES

    def IE_sanitize(self, filename):
        """Cleanup filename from Internet Explorer full paths."""
        return filename and filename[filename.rfind("\\") + 1:].strip()
예제 #57
0
class EmailBaseModel(models.Model):
    sender = models.EmailField(_("sender"), max_length=255)
    recipient = models.CharField(_("recipient"), max_length=255)
    subject = models.CharField(_("subject"), max_length=255, blank=True)
    from_str = models.CharField(_("from"), max_length=255)
    body_plain = models.TextField(_("body plain"), blank=True)
    body_html = models.TextField(_("body html"), blank=True)
    stripped_text = models.TextField(_("stripped text"), blank=True)
    stripped_html = models.TextField(_("stripped html"), blank=True)
    stripped_signature = models.TextField(_("stripped signature"), blank=True)
    message_headers = models.TextField(_("message headers"),
                                       blank=True,
                                       help_text=_("Stored in JSON."))
    content_id_map = models.TextField(
        _("Content-ID map"),
        blank=True,
        help_text=
        _("Dictionary mapping Content-ID (CID) values to corresponding attachments. Stored in JSON."
          ))
    received = models.DateTimeField(_("received"), auto_now_add=True)

    class Meta:
        abstract = True
        verbose_name = _("incoming email")
        verbose_name_plural = _("incoming emails")

    def __init__(self, *args, **kwargs):
        super(EmailBaseModel, self).__init__(*args, **kwargs)
        self._headers = None
        self._cids = None

    def _load_headers(self):
        self._headers = MultiValueDict()
        try:
            header_list = json.loads(self.message_headers)
            for key, val in header_list:
                self._headers.appendlist(key, val)
        except:
            logger.exception(
                "Error parsing JSON data containing message headers")

    @property
    def headers(self):
        """Access message_headers parsed into MultiValueDict"""
        if self._headers is None:
            self._load_headers()
        return self._headers

    def _load_cids(self):
        if self.content_id_map:
            self._cids = {}
        try:
            self._cids = json.loads(self.content_id_map)
        except:
            logger.exception("Error parsing JSON data containing Content-IDs")

    @property
    def content_ids(self):
        """Access content_id_map as dict"""
        if not self.content_id_map:
            return
        if self._cids is None:
            self._load_cids()
        return self._cids

    def __unicode__(self):
        return _("Message from {from_str}: {subject_trunc}").format(
            from_str=self.from_str, subject_trunc=self.subject[:20])
예제 #58
0
파일: models.py 프로젝트: obfusgator/nadine
class EmailMessage(models.Model):
    user = models.ForeignKey(settings.AUTH_USER_MODEL,
                             blank=True,
                             null=True,
                             verbose_name=_("user"),
                             on_delete=models.SET_NULL)
    sender = models.EmailField(_("sender"), max_length=255)
    from_str = models.CharField(_("from"), max_length=255)
    recipient = models.CharField(_("recipient"), max_length=255)
    subject = models.CharField(_("subject"), max_length=255, blank=True)
    body_plain = models.TextField(_("body plain"), blank=True)
    body_html = models.TextField(_("body html"), blank=True)
    stripped_text = models.TextField(_("stripped text"), blank=True)
    stripped_html = models.TextField(_("stripped html"), blank=True)
    stripped_signature = models.TextField(_("stripped signature"), blank=True)
    message_headers = models.TextField(_("message headers"),
                                       blank=True,
                                       help_text=_("Stored in JSON."))
    content_id_map = models.TextField(
        _("Content-ID map"),
        blank=True,
        help_text=
        _("Dictionary mapping Content-ID (CID) values to corresponding attachments. Stored in JSON."
          ))
    received = models.DateTimeField(_("received"), auto_now_add=True)
    mailing_list = models.ForeignKey(MailingList,
                                     blank=True,
                                     null=True,
                                     on_delete=models.CASCADE)

    class Meta:
        verbose_name = _("email message")
        verbose_name_plural = _("email messages")

    def __init__(self, *args, **kwargs):
        super(EmailMessage, self).__init__(*args, **kwargs)
        self._headers = None
        self._cids = None

    def _load_headers(self):
        self._headers = MultiValueDict()
        try:
            header_list = json.loads(self.message_headers)
            for key, val in header_list:
                self._headers.appendlist(key, val)
        except:
            logger.exception(
                "Error parsing JSON data containing message headers")

    def _load_cids(self):
        if self.content_id_map:
            self._cids = {}
        try:
            self._cids = json.loads(self.content_id_map)
        except:
            logger.exception("Error parsing JSON data containing Content-IDs")

    @property
    def headers(self):
        """Access message_headers parsed into MultiValueDict"""
        if self._headers is None:
            self._load_headers()
        return self._headers

    @property
    def content_ids(self):
        """Access content_id_map as dict"""
        if not self.content_id_map:
            return
        if self._cids is None:
            self._load_cids()
        return self._cids

    @property
    def message_id(self):
        return self.headers.get('Message-Id', None)

    @property
    def cc(self):
        return self.headers.get('Cc', None)

    @property
    def references(self):
        return self.headers.get('References', None)

    @property
    def in_reply_to(self):
        return self.headers.get('In-Reply-To', None)

    @property
    def from_name(self):
        from_name, from_address = email.utils.parseaddr(self.from_str)
        return from_name

    @property
    def from_address(self):
        from_name, from_address = email.utils.parseaddr(self.from_str)
        return from_address

    @property
    def clean_subject(self):
        subject = self.subject
        if self.mailing_list and self.mailing_list.subject_prefix:
            prefix = self.mailing_list.subject_prefix
            index = subject.find(prefix)
            if index >= 0:
                subject = subject[index + len(prefix):]
        return subject.strip()

    @property
    def is_moderated_subject(self):
        s = self.subject.lower()
        if "auto-reply" in s:
            return True
        if "auto reply" in s:
            return True
        if "automatic reply" in s:
            return True
        if "out of office" in s:
            return True
        return False

    @property
    def public_url(self):
        return settings.SITE_PROTO + "://" + settings.SITE_DOMAIN + reverse(
            'comlink:mail', kwargs={'id': self.id})

    def get_user(self):
        if not self.user:
            self.user = User.helper.by_email(self.from_address)
        return self.user

    def get_body(self, prefer_html=True):
        if prefer_html:
            if self.stripped_html:
                return self.stripped_html
            if self.body_html:
                return self.body_html
        if self.stripped_text:
            return self.stripped_text
        return self.body_plain

    def get_mailgun_data(self, stripped=True):
        if stripped:
            body_plain = self.stripped_text
            body_html = self.stripped_html
        else:
            body_plain = self.body_plain
            body_html = self.body_html

        # Build and return our data
        mailgun_data = {
            "from": self.from_str,
            "to": [
                self.recipient,
            ],
            "cc": [
                self.cc,
            ],
            "subject": self.clean_subject,
            "text": body_plain,
            "html": body_html,
        }
        return mailgun_data

    def __str__(self):
        return _("Message from {from_str}: {subject_trunc}").format(
            from_str=self.from_str, subject_trunc=self.subject[:20])