Exemple #1
0
def project_list(search_str=None, max_count=0):
    """List projects matching a given linkname filter.

    Filter projects by linkname. Projects are compared to the search
    string via a case-insensitive containment test, a.k.a. a partial
    match.

    Args:
        search_str: The string to compare project names against. If
            blank, all projects will be returned.
        max_count (int): The maximum number of projects to return.

    Returns:
        A serialized list of projects matching filter, if any. A list
        of all projects if no filter given.
    """
    try:
        if search_str:
            projects = Project.objects.filter(linkname__icontains=search_str)
        else:
            projects = Project.objects.all()

        if max_count > 0:
            return map(project_to_dict, projects)[:max_count]
        elif max_count < 0:
            return map(project_to_dict, projects)[max_count:]
        else:
            return list(map(project_to_dict, projects))
    except Project.DoesNotExist:
        return []
Exemple #2
0
def state_list(search_str=None, max_count=0):
    """List states matching a given name filter.

    Filter states by name. States are compared to the search string
    via a case-insensitive containment test, a.k.a. a partial match.

    Args:
        search_str: The string to compare state names against. If
            blank, all states will be returned.
        max_count (int): The maximum number of states to return.

    Returns:
        A serialized list of states matching filter, if any. A list
        of all states if no filter given.
    """
    try:
        if search_str:
            states = State.objects.filter(name__icontains=search_str)
        else:
            states = State.objects.all()

        if max_count > 0:
            return map(state_to_dict, states)[:max_count]
        elif max_count < 0:
            return map(state_to_dict, states)[max_count:]
        else:
            return list(map(state_to_dict, states))
    except State.DoesNotExist:
        return []
Exemple #3
0
def person_list(search_str=None, max_count=0):
    """List persons matching a given name or email filter.

    Filter persons by name and email. Persons are compared to the
    search string via a case-insensitive containment test, a.k.a. a
    partial match.

    Args:
        search_str: The string to compare person names or emails
            against. If blank, all persons will be returned.
        max_count (int): The maximum number of persons to return.

    Returns:
        A serialized list of persons matching filter, if any. A list
        of all persons if no filter given.
    """
    try:
        if search_str:
            people = (Person.objects.filter(name__icontains=search_str) |
                      Person.objects.filter(email__icontains=search_str))
        else:
            people = Person.objects.all()

        if max_count > 0:
            return map(person_to_dict, people)[:max_count]
        elif max_count < 0:
            return map(person_to_dict, people)[max_count:]
        else:
            return list(map(person_to_dict, people))
    except Person.DoesNotExist:
        return []
Exemple #4
0
def output_json(out, code=200):
    if code != 200:
        out['code'] = code
    indent = None
    if settings.DEBUG:
        if isinstance(out, dict):
            out['debug_db_queries'] = connection.queries
        indent = 4
    encoder = GEOS_JSONEncoder(ensure_ascii=False, indent=indent)
    content = encoder.iterencode(out)
    content = map(smart_bytes, content)  # Workaround Django bug #24240

    types = {
        400: http.HttpResponseBadRequest,
        404: http.HttpResponseNotFound,
        500: http.HttpResponseServerError,
    }
    if django.get_version() >= '1.5':
        response_type = types.get(code, http.StreamingHttpResponse)
    else:
        response_type = types.get(code, http.HttpResponse)
        # Django 1.4 middleware messes up iterable content
        content = list(content)

    response = response_type(content_type='application/json; charset=utf-8')
    response['Access-Control-Allow-Origin'] = '*'
    response['Cache-Control'] = 'max-age=2419200'  # 4 weeks
    attr = 'streaming_content' if getattr(response, 'streaming', None) else 'content'
    setattr(response, attr, content)
    return response
Exemple #5
0
def output_json(out, code=200):
    if code != 200:
        out['code'] = code
    indent = None
    if settings.DEBUG:
        if isinstance(out, dict):
            out['debug_db_queries'] = connection.queries
        indent = 4
    encoder = GEOS_JSONEncoder(ensure_ascii=False, indent=indent)
    content = encoder.iterencode(out)

    # We don't want a generator function (iterencode) to be passed to an
    # HttpResponse, as it won't cache due to its close() function adding it to
    # an instance attribute.
    content = map(lambda x: x, content)

    types = {
        400: http.HttpResponseBadRequest,
        404: http.HttpResponseNotFound,
        500: http.HttpResponseServerError,
    }
    response_type = types.get(code, http.StreamingHttpResponse)

    response = response_type(content_type='application/json; charset=utf-8')
    response['Access-Control-Allow-Origin'] = '*'
    response['Cache-Control'] = 'max-age=2419200'  # 4 weeks
    attr = 'streaming_content' if getattr(response, 'streaming', None) else 'content'
    setattr(response, attr, content)
    return response
 def thumbs(self):
     thumb_ids = filter(None, self.request.GET.get('thumbs', '').split(','))
     try:
         thumb_ids = map(int, thumb_ids)
     except TypeError:
         thumbs = Thumb.objects.none()
     else:
         thumbs = Thumb.objects.filter(pk__in=thumb_ids)
     thumb_dict = dict([(t.name, t) for t in thumbs])
     ordered_thumbs = [
         thumb_dict.get(s.name, Thumb(name=s.name)) for s in self.sizes if not s.is_alias]
     return FakeQuerySet(ordered_thumbs, thumbs)
Exemple #7
0
def output_html(request, title, areas, **kwargs):
    kwargs['json_url'] = request.get_full_path().replace('.html', '')
    kwargs['title'] = title
    tpl = render_to_string('mapit/data.html', kwargs, request=request)
    wraps = tpl.split('!!!DATA!!!')

    indent_areas = kwargs.get('indent_areas', False)
    item_tpl = loader.get_template('mapit/areas_item.html')
    areas = map(lambda area: item_tpl.render(Context({'area': area, 'indent_areas': indent_areas})), areas)
    areas = defaultiter(areas, '<li>' + _('No matching areas found.') + '</li>')
    content = itertools.chain(wraps[0:1], areas, wraps[1:])
    content = map(smart_bytes, content)  # Workaround Django bug #24240

    if django.get_version() >= '1.5':
        response_type = http.StreamingHttpResponse
    else:
        response_type = http.HttpResponse
        # Django 1.4 middleware messes up iterable content
        content = list(content)

    return response_type(content)
Exemple #8
0
def output_html(request, title, areas, **kwargs):
    kwargs['json_url'] = request.get_full_path().replace('.html', '')
    kwargs['title'] = title
    tpl = loader.render_to_string('mapit/data.html', kwargs, request=request)
    wraps = tpl.split('!!!DATA!!!')

    indent_areas = kwargs.get('indent_areas', False)
    item_tpl = loader.get_template('mapit/areas_item.html')
    areas = map(lambda area: item_tpl.render({'area': area, 'indent_areas': indent_areas}), areas)
    areas = defaultiter(areas, '<li>' + _('No matching areas found.') + '</li>')
    content = itertools.chain(wraps[0:1], areas, wraps[1:])

    return http.StreamingHttpResponse(content)
Exemple #9
0
def clean_header(header):
    """Decode (possibly non-ascii) headers."""
    def decode(fragment):
        (frag_str, frag_encoding) = fragment
        if frag_encoding:
            return frag_str.decode(frag_encoding)
        elif isinstance(frag_str, six.binary_type):  # python 2
            return frag_str.decode()
        return frag_str

    fragments = list(map(decode, decode_header(header)))

    return normalise_space(u' '.join(fragments))
def render_stacktrace(trace):
    stacktrace = []
    for frame in trace:
        params = map(escape, frame[0].rsplit(os.path.sep, 1) + list(frame[1:]))
        params_dict = dict((text_type(idx), v) for idx, v in enumerate(params))
        try:
            stacktrace.append('<span class="path">%(0)s/</span>'
                              '<span class="file">%(1)s</span>'
                              ' in <span class="func">%(3)s</span>'
                              '(<span class="lineno">%(2)s</span>)\n'
                              '  <span class="code">%(4)s</span>'
                              % params_dict)
        except KeyError:
            # This frame doesn't have the expected format, so skip it and move on to the next one
            continue
    return mark_safe('\n'.join(stacktrace))
Exemple #11
0
def hash_patch(str):
    # normalise spaces
    str = str.replace('\r', '')
    str = str.strip() + '\n'

    prefixes = ['-', '+', ' ']
    hash = hashlib.sha1()

    for line in str.split('\n'):

        if len(line) <= 0:
            continue

        hunk_match = _hunk_re.match(line)
        filename_match = _filename_re.match(line)

        if filename_match:
            # normalise -p1 top-directories
            if filename_match.group(1) == '---':
                filename = 'a/'
            else:
                filename = 'b/'
            filename += '/'.join(filename_match.group(2).split('/')[1:])

            line = filename_match.group(1) + ' ' + filename

        elif hunk_match:
            # remove line numbers, but leave line counts
            def fn(x):
                if not x:
                    return 1
                return int(x)
            line_nos = list(map(fn, hunk_match.groups()))
            line = '@@ -%d +%d @@' % tuple(line_nos)

        elif line[0] in prefixes:
            # if we have a +, - or context line, leave as-is
            pass

        else:
            # other lines are ignored
            continue

        hash.update((line + '\n').encode('utf-8'))

    return hash
Exemple #12
0
    def post(self, request, *args, **kwargs):
        # Dynamic dispatch pattern - we forward POST requests onto a method
        # designated by the 'action' parameter.  The action has to be in a
        # whitelist to avoid security issues.
        action = request.POST.get(self.action_param, "").lower()
        if not self.actions or action not in self.actions:
            messages.error(self.request, _("Invalid action"))
            return redirect(self.get_error_url(request))

        ids = request.POST.getlist("selected_%s" % self.get_checkbox_object_name())
        ids = list(map(int, ids))
        if not ids:
            messages.error(self.request, _("You need to select some %ss") % self.get_checkbox_object_name())
            return redirect(self.get_error_url(request))

        objects = self.get_objects(ids)
        return getattr(self, action)(request, objects)
Exemple #13
0
 def format_value(self, value):
     if value:
         return ','.join(map(six.text_type, filter(bool, value)))
     else:
         return ''
Exemple #14
0
    def execute(self, sql, params=()):
        start = datetime.now()
        try:
            return self.cursor.execute(sql, params)
        finally:
            # FIXME: Sometimes connections which are not in the connections
            # dict are used (for example in test database destroying).
            # The code below (at least get_transaction_id(alias) needs to have
            # the connection in the connections dict. It would be good to
            # not have this requirement at all, but for now lets just skip
            # these connections.
            if self.db.alias not in connections:
                return
            stop = datetime.now()
            duration = ms_from_timedelta(stop - start)
            enable_stacktraces = getattr(settings,
                'DEBUG_TOOLBAR_CONFIG', {}).get('ENABLE_STACKTRACES', True)
            if enable_stacktraces:
                stacktrace = tidy_stacktrace(reversed(get_stack()))
            else:
                stacktrace = []
            _params = ''
            try:
                _params = json.dumps(list(map(self._decode, params)))
            except Exception:
                pass  # object not JSON serializable

            template_info = None
            cur_frame = sys._getframe().f_back
            try:
                while cur_frame is not None:
                    if cur_frame.f_code.co_name == 'render':
                        node = cur_frame.f_locals['self']
                        if isinstance(node, Node):
                            template_info = get_template_info(node.source)
                            break
                    cur_frame = cur_frame.f_back
            except:
                pass
            del cur_frame

            alias = getattr(self.db, 'alias', 'default')
            conn = self.db.connection
            # HACK: avoid imports
            if conn:
                engine = conn.__class__.__module__.split('.', 1)[0]
            else:
                engine = 'unknown'

            params = {
                'engine': engine,
                'alias': alias,
                'sql': self.db.ops.last_executed_query(self.cursor, sql,
                                                self._quote_params(params)),
                'duration': duration,
                'raw_sql': sql,
                'params': _params,
                'hash': sha1(smart_bytes(settings.SECRET_KEY) \
                                        + smart_bytes(sql) \
                                        + smart_bytes(_params)).hexdigest(),
                'stacktrace': stacktrace,
                'start_time': start,
                'stop_time': stop,
                'is_slow': (duration > SQL_WARNING_THRESHOLD),
                'is_select': sql.lower().strip().startswith('select'),
                'template_info': template_info,
            }

            if engine == 'psycopg2':
                # If an erroneous query was ran on the connection, it might
                # be in a state where checking isolation_level raises an
                # exception.
                try:
                    iso_level = conn.isolation_level
                except conn.InternalError:
                    iso_level = 'unknown'
                params.update({
                    'trans_id': self.logger.get_transaction_id(alias),
                    'trans_status': conn.get_transaction_status(),
                    'iso_level': iso_level,
                    'encoding': conn.encoding,
                })

            # We keep `sql` to maintain backwards compatibility
            self.logger.record(**params)
Exemple #15
0
 def streaming_content(self):
     return map(self.make_bytes, self._iterator)
Exemple #16
0
def iterload_objects(import_paths):
    """
    Load a list of objects.
    """
    return map(load_object, import_paths)
 def format_value(self, value):
     if value:
         return ','.join(map(six.text_type, filter(bool, value)))
     else:
         return ''
Exemple #18
0
 def _quote_params(self, params):
     if isinstance(params, dict):
         return dict((key, self._quote_expr(value))
                         for key, value in iteritems(params))
     return list(map(self._quote_expr, params))
Exemple #19
0
def facet_data(request, form, results):  # noqa (too complex (10))
    """
    Convert Haystack's facet data into a more useful datastructure that
    templates can use without having to manually construct URLs
    """
    facet_data = {}
    if not results:
        return facet_data

    base_url = URL(request.get_full_path())
    facet_counts = results.facet_counts()

    # Field facets
    valid_facets = [f for f in form.selected_facets if ':' in f]
    selected = dict(map(lambda x: x.split(':', 1), valid_facets))
    for key, facet in settings.OSCAR_SEARCH_FACETS['fields'].items():
        facet_data[key] = {'name': facet['name'], 'results': []}
        for name, count in facet_counts['fields'][key]:
            # Ignore zero-count facets for field
            if count == 0:
                continue
            field_filter = '%s_exact' % facet['field']
            datum = {'name': name, 'count': count}
            if selected.get(field_filter, None) == name:
                # This filter is selected - build the 'deselect' URL
                datum['selected'] = True
                url = base_url.remove_query_param(
                    'selected_facets', '%s:%s' % (field_filter, name))
                # Don't carry through pagination params
                if url.has_query_param('page'):
                    url = url.remove_query_param('page')
                datum['deselect_url'] = url.as_string()
            else:
                # This filter is not selected - built the 'select' URL
                datum['selected'] = False
                url = base_url.append_query_param(
                    'selected_facets', '%s:%s' % (field_filter, name))
                # Don't carry through pagination params
                if url.has_query_param('page'):
                    url = url.remove_query_param('page')
                datum['select_url'] = url.as_string()
            facet_data[key]['results'].append(datum)

    # Query facets
    for key, facet in settings.OSCAR_SEARCH_FACETS['queries'].items():
        facet_data[key] = {'name': facet['name'], 'results': []}
        for name, query in facet['queries']:
            field_filter = '%s_exact' % facet['field']
            match = '%s_exact:%s' % (facet['field'], query)
            if match not in facet_counts['queries']:
                datum = {
                    'name': name,
                    'count': 0,
                }
            else:
                datum = {
                    'name': name,
                    'count': facet_counts['queries'][match],
                }
                if selected.get(field_filter, None) == query:
                    # Selected
                    datum['selected'] = True
                    url = base_url.remove_query_param('selected_facets', match)
                    datum['deselect_url'] = url.as_string()
                else:
                    datum['selected'] = False
                    url = base_url.append_query_param('selected_facets', match)
                    datum['select_url'] = url.as_string()

            facet_data[key]['results'].append(datum)

    return facet_data
Exemple #20
0
    def execute(self, sql, params=()):
        start = datetime.now()
        try:
            return self.cursor.execute(sql, params)
        finally:
            # FIXME: Sometimes connections which are not in the connections
            # dict are used (for example in test database destroying).
            # The code below (at least get_transaction_id(alias) needs to have
            # the connection in the connections dict. It would be good to
            # not have this requirement at all, but for now lets just skip
            # these connections.
            if self.db.alias not in connections:
                return
            stop = datetime.now()
            duration = ms_from_timedelta(stop - start)
            enable_stacktraces = getattr(settings, 'DEBUG_TOOLBAR_CONFIG',
                                         {}).get('ENABLE_STACKTRACES', True)
            if enable_stacktraces:
                stacktrace = tidy_stacktrace(reversed(get_stack()))
            else:
                stacktrace = []
            _params = ''
            try:
                _params = json.dumps(list(map(self._decode, params)))
            except Exception:
                pass  # object not JSON serializable

            template_info = None
            cur_frame = sys._getframe().f_back
            try:
                while cur_frame is not None:
                    if cur_frame.f_code.co_name == 'render':
                        node = cur_frame.f_locals['self']
                        if isinstance(node, Node):
                            template_info = get_template_info(node.source)
                            break
                    cur_frame = cur_frame.f_back
            except:
                pass
            del cur_frame

            alias = getattr(self.db, 'alias', 'default')
            conn = self.db.connection
            # HACK: avoid imports
            if conn:
                engine = conn.__class__.__module__.split('.', 1)[0]
            else:
                engine = 'unknown'

            params = {
                'engine': engine,
                'alias': alias,
                'sql': self.db.ops.last_executed_query(self.cursor, sql,
                                                self._quote_params(params)),
                'duration': duration,
                'raw_sql': sql,
                'params': _params,
                'hash': sha1(smart_bytes(settings.SECRET_KEY) \
                                        + smart_bytes(sql) \
                                        + smart_bytes(_params)).hexdigest(),
                'stacktrace': stacktrace,
                'start_time': start,
                'stop_time': stop,
                'is_slow': (duration > SQL_WARNING_THRESHOLD),
                'is_select': sql.lower().strip().startswith('select'),
                'template_info': template_info,
            }

            if engine == 'psycopg2':
                # If an erroneous query was ran on the connection, it might
                # be in a state where checking isolation_level raises an
                # exception.
                try:
                    iso_level = conn.isolation_level
                except conn.InternalError:
                    iso_level = 'unknown'
                params.update({
                    'trans_id': self.logger.get_transaction_id(alias),
                    'trans_status': conn.get_transaction_status(),
                    'iso_level': iso_level,
                    'encoding': conn.encoding,
                })

            # We keep `sql` to maintain backwards compatibility
            self.logger.record(**params)
Exemple #21
0
DINERS_CLUB = 'Diners Club'
CHINA_UNIONPAY = 'China UnionPay'
JCB = 'JCB'
LASER = 'Laser'
SOLO = 'Solo'
SWITCH = 'Switch'

# List of (type, lengths, prefixes) tuples
# See http://en.wikipedia.org/wiki/Bank_card_number
CARD_TYPES = [
    (AMEX, (15,), ('34', '37')),
    (CHINA_UNIONPAY, (16, 17, 18, 19), ('62', '88')),
    (DINERS_CLUB, (14,), ('300', '301', '302', '303', '304', '305')),
    (DINERS_CLUB, (14,), ('36',)),
    (DISCOVER, (16,),
     list(map(str, list(range(622126, 622926)))) +
     list(map(str, list(range(644, 650)))) + ['6011', '65']),
    (JCB, (16,), map(str, list(range(3528, 3590)))),
    (LASER, list(range(16, 20)), ('6304', '6706', '6771', '6709')),
    (MAESTRO, list(range(12, 20)), ('5018', '5020', '5038', '5893', '6304',
                                    '6759', '6761', '6762', '6763', '0604')),
    (MASTERCARD, (16,), list(map(str, list(range(51, 56))))),
    # Diners Club cards match the same pattern as Mastercard.  They are treated
    # as Mastercard normally so we put the mastercard pattern first.
    (DINERS_CLUB, (16,), ('54', '55')),
    (SOLO, list(range(16, 20)), ('6334', '6767')),
    (SWITCH, list(range(16, 20)), ('4903', '4905', '4911', '4936',
                                   '564182', '633110', '6333', '6759')),
    (VISA, (13, 16), ('4',)),
    (VISA_ELECTRON, (16,), ('4026', '417500', '4405', '4508',
                            '4844', '4913', '4917')),
Exemple #22
0
 def setUp(self):
     self.people = [
         Person(name="Test Name", email="*****@*****.**"),
         Person(email="*****@*****.**"),
     ]
     list(map(lambda p: p.save(), self.people))
Exemple #23
0
def parse_patch(text):
    patchbuf = ''
    commentbuf = ''
    buf = ''

    # state specified the line we just saw, and what to expect next
    state = 0
    # 0: text
    # 1: suspected patch header (diff, ====, Index:)
    # 2: patch header line 1 (---)
    # 3: patch header line 2 (+++)
    # 4: patch hunk header line (@@ line)
    # 5: patch hunk content
    # 6: patch meta header (rename from/rename to)
    #
    # valid transitions:
    #  0 -> 1 (diff, ===, Index:)
    #  0 -> 2 (---)
    #  1 -> 2 (---)
    #  2 -> 3 (+++)
    #  3 -> 4 (@@ line)
    #  4 -> 5 (patch content)
    #  5 -> 1 (run out of lines from @@-specifed count)
    #  1 -> 6 (rename from / rename to)
    #  6 -> 2 (---)
    #  6 -> 1 (other text)
    #
    # Suspected patch header is stored into buf, and appended to
    # patchbuf if we find a following hunk. Otherwise, append to
    # comment after parsing.

    # line counts while parsing a patch hunk
    lc = (0, 0)
    hunk = 0

    for line in text.split('\n'):
        line += '\n'

        if state == 0:
            if line.startswith('diff ') or line.startswith('===') \
                    or line.startswith('Index: '):
                state = 1
                buf += line

            elif line.startswith('--- '):
                state = 2
                buf += line

            else:
                commentbuf += line

        elif state == 1:
            buf += line
            if line.startswith('--- '):
                state = 2

            if line.startswith(('rename from ', 'rename to ')):
                state = 6

        elif state == 2:
            if line.startswith('+++ '):
                state = 3
                buf += line

            elif hunk:
                state = 1
                buf += line

            else:
                state = 0
                commentbuf += buf + line
                buf = ''

        elif state == 3:
            match = _hunk_re.match(line)
            if match:

                def fn(x):
                    if not x:
                        return 1
                    return int(x)

                lc = list(map(fn, match.groups()))

                state = 4
                patchbuf += buf + line
                buf = ''

            elif line.startswith('--- '):
                patchbuf += buf + line
                buf = ''
                state = 2

            elif hunk and line.startswith('\\ No newline at end of file'):
                # If we had a hunk and now we see this, it's part of the patch,
                # and we're still expecting another @@ line.
                patchbuf += line

            elif hunk:
                state = 1
                buf += line

            else:
                state = 0
                commentbuf += buf + line
                buf = ''

        elif state == 4 or state == 5:
            if line.startswith('-'):
                lc[0] -= 1
            elif line.startswith('+'):
                lc[1] -= 1
            elif line.startswith('\\ No newline at end of file'):
                # Special case: Not included as part of the hunk's line count
                pass
            else:
                lc[0] -= 1
                lc[1] -= 1

            patchbuf += line

            if lc[0] <= 0 and lc[1] <= 0:
                state = 3
                hunk += 1
            else:
                state = 5

        elif state == 6:
            if line.startswith(('rename to ', 'rename from ')):
                patchbuf += buf + line
                buf = ''

            elif line.startswith('--- '):
                patchbuf += buf + line
                buf = ''
                state = 2

            else:
                buf += line
                state = 1

        else:
            raise Exception("Unknown state %d! (line '%s')" % (state, line))

    commentbuf += buf

    if patchbuf == '':
        patchbuf = None

    if commentbuf == '':
        commentbuf = None

    return (patchbuf, commentbuf)
Exemple #24
0
 def _quote_params(self, params):
     if isinstance(params, dict):
         return dict((key, self._quote_expr(value))
                     for key, value in iteritems(params))
     return list(map(self._quote_expr, params))
Exemple #25
0
def facet_data(request, form, results):  # noqa (too complex (10))
    """
    Convert Haystack's facet data into a more useful datastructure that
    templates can use without having to manually construct URLs
    """
    facet_data = {}
    if not results:
        return facet_data

    base_url = URL(request.get_full_path())
    facet_counts = results.facet_counts()

    # Field facets
    valid_facets = [f for f in form.selected_facets if ':' in f]
    selected = dict(
        map(lambda x: x.split(':', 1), valid_facets))
    for key, facet in settings.OSCAR_SEARCH_FACETS['fields'].items():
        facet_data[key] = {
            'name': facet['name'],
            'results': []}
        for name, count in facet_counts['fields'][key]:
            # Ignore zero-count facets for field
            if count == 0:
                continue
            field_filter = '%s_exact' % facet['field']
            datum = {
                'name': name,
                'count': count}
            if selected.get(field_filter, None) == name:
                # This filter is selected - build the 'deselect' URL
                datum['selected'] = True
                url = base_url.remove_query_param(
                    'selected_facets', '%s:%s' % (
                        field_filter, name))
                # Don't carry through pagination params
                if url.has_query_param('page'):
                    url = url.remove_query_param('page')
                datum['deselect_url'] = url.as_string()
            else:
                # This filter is not selected - built the 'select' URL
                datum['selected'] = False
                url = base_url.append_query_param(
                    'selected_facets', '%s:%s' % (
                        field_filter, name))
                # Don't carry through pagination params
                if url.has_query_param('page'):
                    url = url.remove_query_param('page')
                datum['select_url'] = url.as_string()
            facet_data[key]['results'].append(datum)

    # Query facets
    for key, facet in settings.OSCAR_SEARCH_FACETS['queries'].items():
        facet_data[key] = {
            'name': facet['name'],
            'results': []}
        for name, query in facet['queries']:
            field_filter = '%s_exact' % facet['field']
            match = '%s_exact:%s' % (facet['field'], query)
            if match not in facet_counts['queries']:
                datum = {
                    'name': name,
                    'count': 0,
                }
            else:
                datum = {
                    'name': name,
                    'count': facet_counts['queries'][match],
                }
                if selected.get(field_filter, None) == query:
                    # Selected
                    datum['selected'] = True
                    url = base_url.remove_query_param(
                        'selected_facets', match)
                    datum['deselect_url'] = url.as_string()
                else:
                    datum['selected'] = False
                    url = base_url.append_query_param(
                        'selected_facets', match)
                    datum['select_url'] = url.as_string()

            facet_data[key]['results'].append(datum)

    return facet_data
def patch_list(filt=None):
    """List patches matching all of a given set of filters.

    Filter patches by one or more of the below fields:

     * id
     * name
     * project_id
     * submitter_id
     * delegate_id
     * archived
     * state_id
     * date
     * commit_ref
     * hash
     * msgid

    It is also possible to specify the number of patches returned via
    a ``max_count`` filter.

     * max_count

    With the exception of ``max_count``, the specified field of the
    patches are compared to the search string using a provided
    field lookup type, which can be one of:

     * iexact
     * contains
     * icontains
     * gt
     * gte
     * lt
     * in
     * startswith
     * istartswith
     * endswith
     * iendswith
     * range
     * year
     * month
     * day
     * isnull

    Please refer to the Django documentation for more information on
    these field lookup types.

    An example filter would look like so:

    {
        'name__icontains': 'Joe Bloggs',
        'max_count': 1,
    }

    Args:
        filt (dict): The filters specifying the field to compare, the
            lookup type and the value to compare against. Keys are of
            format ``[FIELD_NAME]`` or ``[FIELD_NAME]__[LOOKUP_TYPE]``.
            Example: ``name__icontains``. Values are plain strings to
            compare against.

    Returns:
        A serialized list of patches matching filters, if any. A list
        of all patches if no filter given.
    """
    if filt is None:
        filt = {}

    try:
        # We allow access to many of the fields.  But, some fields are
        # filtered by raw object so we must lookup by ID instead over
        # XML-RPC.
        ok_fields = [
            'id',
            'name',
            'project_id',
            'submitter_id',
            'delegate_id',
            'archived',
            'state_id',
            'date',
            'commit_ref',
            'hash',
            'msgid',
            'max_count',
        ]

        dfilter = {}
        max_count = 0

        for key in filt:
            parts = key.split('__')
            if parts[0] not in ok_fields:
                # Invalid field given
                return []
            if len(parts) > 1:
                if LOOKUP_TYPES.count(parts[1]) == 0:
                    # Invalid lookup type given
                    return []

            if parts[0] == 'project_id':
                dfilter['project'] = Project.objects.filter(id=filt[key])[0]
            elif parts[0] == 'submitter_id':
                dfilter['submitter'] = Person.objects.filter(id=filt[key])[0]
            elif parts[0] == 'delegate_id':
                dfilter['delegate'] = Person.objects.filter(id=filt[key])[0]
            elif parts[0] == 'state_id':
                dfilter['state'] = State.objects.filter(id=filt[key])[0]
            elif parts[0] == 'max_count':
                max_count = filt[key]
            else:
                dfilter[key] = filt[key]

        patches = Patch.objects.filter(**dfilter)

        if max_count > 0:
            return list(map(patch_to_dict, patches[:max_count]))
        elif max_count < 0:
            query = patches.reverse()[:-max_count]
            return [patch_to_dict(patch) for patch in reversed(query)]
        else:
            return list(map(patch_to_dict, patches))
    except Patch.DoesNotExist:
        return []
Exemple #27
0
def check_list(filt=None):
    """List checks matching all of a given set of filters.

    Filter checks by one or more of the below fields:

     * id
     * user
     * project_id
     * patch_id

    It is also possible to specify the number of patches returned via
    a ``max_count`` filter.

     * max_count

    With the exception of ``max_count``, the specified field of the
    patches are compared to the search string using a provided
    field lookup type, which can be one of:

     * iexact
     * contains
     * icontains
     * gt
     * gte
     * lt
     * in
     * startswith
     * istartswith
     * endswith
     * iendswith
     * range
     * year
     * month
     * day
     * isnull

    Please refer to the Django documentation for more information on
    these field lookup types.

    An example filter would look like so:

    {
        'user__icontains': 'Joe Bloggs',
        'max_count': 1,
    }

    Args:
        filt (dict): The filters specifying the field to compare, the
            lookup type and the value to compare against. Keys are of
            format ``[FIELD_NAME]`` or ``[FIELD_NAME]__[LOOKUP_TYPE]``.
            Example: ``name__icontains``. Values are plain strings to
            compare against.

    Returns:
        A serialized list of Checks matching filters, if any. A list
        of all Checks if no filter given.
    """
    if filt is None:
        filt = {}

    try:
        # We allow access to many of the fields. But, some fields are
        # filtered by raw object so we must lookup by ID instead over
        # XML-RPC.
        ok_fields = [
            'id',
            'user',
            'project_id',
            'patch_id',
            'max_count',
        ]

        dfilter = {}
        max_count = 0

        for key in filt:
            parts = key.split('__')
            if parts[0] not in ok_fields:
                # Invalid field given
                return []
            if len(parts) > 1:
                if LOOKUP_TYPES.count(parts[1]) == 0:
                    # Invalid lookup type given
                    return []

            if parts[0] == 'user_id':
                dfilter['user'] = Person.objects.filter(id=filt[key])[0]
            if parts[0] == 'project_id':
                dfilter['patch__project'] = Project.objects.filter(
                    id=filt[key])[0]
            elif parts[0] == 'patch_id':
                dfilter['patch'] = Patch.objects.filter(id=filt[key])[0]
            elif parts[0] == 'max_count':
                max_count = filt[key]
            else:
                dfilter[key] = filt[key]

        checks = Check.objects.filter(**dfilter)

        if max_count > 0:
            return list(map(check_to_dict, checks[:max_count]))
        else:
            return list(map(check_to_dict, checks))
    except Check.DoesNotExist:
        return []
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.six.moves import map

register = template.Library()


def _compile(t):
    (r, str) = t
    return (re.compile(r, re.M | re.I), str)


_patch_span_res = list(
    map(_compile, [
        ('^(Index:?|diff|\-\-\-|\+\+\+|\*\*\*) .*$', 'p_header'),
        ('^\+.*$', 'p_add'),
        ('^-.*$', 'p_del'),
        ('^!.*$', 'p_mod'),
    ]))

_patch_chunk_re = \
    re.compile('^(@@ \-\d+(?:,\d+)? \+\d+(?:,\d+)? @@)(.*)$', re.M | re.I)

_comment_span_res = list(
    map(_compile, [
        ('^\s*Signed-off-by: .*$', 'signed-off-by'),
        ('^\s*Acked-by: .*$', 'acked-by'),
        ('^\s*Nacked-by: .*$', 'nacked-by'),
        ('^\s*Tested-by: .*$', 'tested-by'),
        ('^\s*Reviewed-by: .*$', 'reviewed-by'),
        ('^\s*From: .*$', 'from'),
        ('^\s*&gt;.*$', 'quote'),
Exemple #29
0
def parse_patch(text):
    patchbuf = ''
    commentbuf = ''
    buf = ''

    # state specified the line we just saw, and what to expect next
    state = 0
    # 0: text
    # 1: suspected patch header (diff, ====, Index:)
    # 2: patch header line 1 (---)
    # 3: patch header line 2 (+++)
    # 4: patch hunk header line (@@ line)
    # 5: patch hunk content
    # 6: patch meta header (rename from/rename to)
    #
    # valid transitions:
    #  0 -> 1 (diff, ===, Index:)
    #  0 -> 2 (---)
    #  1 -> 2 (---)
    #  2 -> 3 (+++)
    #  3 -> 4 (@@ line)
    #  4 -> 5 (patch content)
    #  5 -> 1 (run out of lines from @@-specifed count)
    #  1 -> 6 (rename from / rename to)
    #  6 -> 2 (---)
    #  6 -> 1 (other text)
    #
    # Suspected patch header is stored into buf, and appended to
    # patchbuf if we find a following hunk. Otherwise, append to
    # comment after parsing.

    # line counts while parsing a patch hunk
    lc = (0, 0)
    hunk = 0

    for line in text.split('\n'):
        line += '\n'

        if state == 0:
            if line.startswith('diff ') or line.startswith('===') \
                    or line.startswith('Index: '):
                state = 1
                buf += line

            elif line.startswith('--- '):
                state = 2
                buf += line

            else:
                commentbuf += line

        elif state == 1:
            buf += line
            if line.startswith('--- '):
                state = 2

            if line.startswith(('rename from ', 'rename to ')):
                state = 6

        elif state == 2:
            if line.startswith('+++ '):
                state = 3
                buf += line

            elif hunk:
                state = 1
                buf += line

            else:
                state = 0
                commentbuf += buf + line
                buf = ''

        elif state == 3:
            match = _hunk_re.match(line)
            if match:

                def fn(x):
                    if not x:
                        return 1
                    return int(x)

                lc = list(map(fn, match.groups()))

                state = 4
                patchbuf += buf + line
                buf = ''

            elif line.startswith('--- '):
                patchbuf += buf + line
                buf = ''
                state = 2

            elif hunk and line.startswith('\ No newline at end of file'):
                # If we had a hunk and now we see this, it's part of the patch,
                # and we're still expecting another @@ line.
                patchbuf += line

            elif hunk:
                state = 1
                buf += line

            else:
                state = 0
                commentbuf += buf + line
                buf = ''

        elif state == 4 or state == 5:
            if line.startswith('-'):
                lc[0] -= 1
            elif line.startswith('+'):
                lc[1] -= 1
            elif line.startswith('\ No newline at end of file'):
                # Special case: Not included as part of the hunk's line count
                pass
            else:
                lc[0] -= 1
                lc[1] -= 1

            patchbuf += line

            if lc[0] <= 0 and lc[1] <= 0:
                state = 3
                hunk += 1
            else:
                state = 5

        elif state == 6:
            if line.startswith(('rename to ', 'rename from ')):
                patchbuf += buf + line
                buf = ''

            elif line.startswith('--- '):
                patchbuf += buf + line
                buf = ''
                state = 2

            else:
                buf += line
                state = 1

        else:
            raise Exception("Unknown state %d! (line '%s')" % (state, line))

    commentbuf += buf

    if patchbuf == '':
        patchbuf = None

    if commentbuf == '':
        commentbuf = None

    return (patchbuf, commentbuf)
Exemple #30
0
def get_subclasses(c):
    """
    Get all subclasses of a given class
    """
    return c.__subclasses__() + sum(map(get_subclasses, c.__subclasses__()), [])
    def multiple_replace(dict, text):
        # Create a regular expression  from the dictionary keys
        regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))

        # For each match, look-up corresponding value in dictionary
        return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
Exemple #32
0
from django import template
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.six.moves import map


register = template.Library()


def _compile(t):
    (r, str) = t
    return (re.compile(r, re.M | re.I), str)

_patch_span_res = list(map(_compile, [
    ('^(Index:?|diff|\-\-\-|\+\+\+|\*\*\*) .*$', 'p_header'),
    ('^\+.*$', 'p_add'),
    ('^-.*$', 'p_del'),
    ('^!.*$', 'p_mod'),
]))

_patch_chunk_re = \
    re.compile('^(@@ \-\d+(?:,\d+)? \+\d+(?:,\d+)? @@)(.*)$', re.M | re.I)

_comment_span_res = list(map(_compile, [
    ('^\s*Signed-off-by: .*$', 'signed-off-by'),
    ('^\s*Acked-by: .*$', 'acked-by'),
    ('^\s*Nacked-by: .*$', 'nacked-by'),
    ('^\s*Tested-by: .*$', 'tested-by'),
    ('^\s*Reviewed-by: .*$', 'reviewed-by'),
    ('^\s*From: .*$', 'from'),
    ('^\s*&gt;.*$', 'quote'),
]))
Exemple #33
0
        # If run with no args, try and run the testsuite as fast as possible.
        # That means across all cores and with no high-falutin' plugins.

        try:
            cpu_count = int(multiprocessing.cpu_count())
        except ValueError:
            cpu_count = 1

        args = [
            '--capture=no', '--nomigrations', '-n=%d' % cpu_count,
            'tests'
        ]
    else:
        # Some args/options specified.  Check to see if any nose options have
        # been specified.  If they have, then don't set any
        has_options = any(map(lambda x: x.startswith('--'), args))
        if not has_options:
            # Default options:
            # --exitfirst Abort on first error/failure
            # --capture=no Don't capture STDOUT
            args.extend(['--capture=no', '--nomigrations', '--exitfirst'])
        else:
            args = [arg for arg in args if not arg.startswith('-')]

    with warnings.catch_warnings():
        # The warnings module in default configuration will never cause tests
        # to fail, as it never raises an exception.  We alter that behaviour by
        # turning DeprecationWarnings into exceptions, but exclude warnings
        # triggered by third-party libs. Note: The context manager is not thread
        # safe. Behaviour with multiple threads is undefined.
        warnings.filterwarnings('error', category=DeprecationWarning)
Exemple #34
0
def get_subclasses(c):
    """
    Get all subclasses of a given class
    """
    return c.__subclasses__() + sum(map(get_subclasses, c.__subclasses__()),
                                    [])
Exemple #35
0
def iterload_objects(import_paths):
    """
    Load a list of objects.
    """
    return map(load_object, import_paths)
Exemple #36
0
 def streaming_content(self):
     return map(self.make_bytes, self._iterator)
Exemple #37
0
    verbosity = 1
    if not args:
        # If run with no args, try and run the testsuite as fast as possible.
        # That means across all cores and with no high-falutin' plugins.
        import multiprocessing

        try:
            num_cores = multiprocessing.cpu_count()
        except NotImplementedError:
            num_cores = 4  # Guess
        args = ["--nocapture", "--stop", "--processes=%s" % num_cores]
    else:
        # Some args/options specified.  Check to see if any nose options have
        # been specified.  If they have, then don't set any
        has_options = any(map(lambda x: x.startswith("--"), args))
        if not has_options:
            # Default options:
            # --stop Abort on first error/failure
            # --nocapture Don't capture STDOUT
            args.extend(["--nocapture", "--stop"])
        else:
            # Remove options as nose will pick these up from sys.argv
            for arg in args:
                if arg.startswith("--verbosity"):
                    verbosity = int(arg[-1])
            args = [arg for arg in args if not arg.startswith("-")]

    configure()
    with warnings.catch_warnings():
        # The warnings module in default configuration will never cause tests
Exemple #38
0
    verbosity = 1
    if not args:
        # If run with no args, try and run the testsuite as fast as possible.
        # That means across all cores and with no high-falutin' plugins.

        try:
            cpu_count = int(multiprocessing.cpu_count())
        except ValueError:
            cpu_count = 1

        args = ['--capture=no', '--nomigrations', '-n=%d' % cpu_count, 'tests']
    else:
        # Some args/options specified.  Check to see if any options have
        # been specified.  If they have, then don't set any
        has_options = any(map(lambda x: x.startswith('--'), args))
        if not has_options:
            # Default options:
            # --exitfirst Abort on first error/failure
            # --capture=no Don't capture STDOUT
            args.extend(['--capture=no', '--nomigrations', '--exitfirst'])
        else:
            args = [arg for arg in args if not arg.startswith('-')]

    with warnings.catch_warnings():
        # The warnings module in default configuration will never cause tests
        # to fail, as it never raises an exception.  We alter that behaviour by
        # turning DeprecationWarnings into exceptions, but exclude warnings
        # triggered by third-party libs. Note: The context manager is not
        # thread safe. Behaviour with multiple threads is undefined.
        warnings.filterwarnings('error', category=DeprecationWarning)
Exemple #39
0
DINERS_CLUB = 'Diners Club'
CHINA_UNIONPAY = 'China UnionPay'
JCB = 'JCB'
LASER = 'Laser'
SOLO = 'Solo'
SWITCH = 'Switch'

# List of (type, lengths, prefixes) tuples
# See http://en.wikipedia.org/wiki/Bank_card_number
CARD_TYPES = [
    (AMEX, (15,), ('34', '37')),
    (CHINA_UNIONPAY, (16, 17, 18, 19), ('62', '88')),
    (DINERS_CLUB, (14,), ('300', '301', '302', '303', '304', '305')),
    (DINERS_CLUB, (14,), ('36',)),
    (DISCOVER, (16,),
     list(map(str, list(range(622126, 622926)))) +
     list(map(str, list(range(644, 650)))) + ['6011', '65']),
    (JCB, (16,), map(str, list(range(3528, 3590)))),
    (LASER, list(range(16, 20)), ('6304', '6706', '6771', '6709')),
    (MAESTRO, list(range(12, 20)), ('5018', '5020', '5038', '5893', '6304',
                                    '6759', '6761', '6762', '6763', '0604')),
    (MASTERCARD, (16,), list(map(str, list(range(51, 56))))),
    # Diners Club cards match the same pattern as Mastercard.  They are treated
    # as Mastercard normally so we put the mastercard pattern first.
    (DINERS_CLUB, (16,), ('54', '55')),
    (SOLO, list(range(16, 20)), ('6334', '6767')),
    (SWITCH, list(range(16, 20)), ('4903', '4905', '4911', '4936',
                                   '564182', '633110', '6333', '6759')),
    (VISA, (13, 16), ('4',)),
    (VISA_ELECTRON, (16,), ('4026', '417500', '4405', '4508',
                            '4844', '4913', '4917')),
Exemple #40
0
 def setUp(self):
     self.people = [
         Person(name="Test Name", email="*****@*****.**"),
         Person(email="*****@*****.**"),
     ]
     list(map(lambda p: p.save(), self.people))
Exemple #41
0
    def multiple_replace(dict, text):
        # Create a regular expression  from the dictionary keys
        regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))

        # For each match, look-up corresponding value in dictionary
        return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
Exemple #42
0
def general(request):
    initial_data = _intial_settings_data()
    initial_data["storage_service_use_default_config"] = {"False": False}.get(
        initial_data.get("storage_service_use_default_config", True), True
    )
    general_form = GeneralSettingsForm(
        request.POST or None, prefix="general", initial=initial_data
    )
    storage_form = StorageSettingsForm(
        request.POST or None, prefix="storage", initial=initial_data
    )
    checksum_form = ChecksumSettingsForm(
        request.POST or None, prefix="checksum algorithm", initial=initial_data
    )

    forms = (general_form, storage_form, checksum_form)
    if all(map(lambda form: form.is_valid(), forms)):
        for item in forms:
            item.save()
        messages.info(request, _("Saved."))

    dashboard_uuid = helpers.get_setting("dashboard_uuid")

    not_created_yet = False
    try:
        pipeline = storage_service.get_pipeline(dashboard_uuid)
    except Exception as err:
        if err.response is not None and err.response.status_code == 404:
            # The server has returned a 404, we're going to assume that this is
            # the Storage Service telling us that the pipeline is unknown.
            not_created_yet = True
        else:
            messages.warning(
                request,
                _(
                    "Storage Service inaccessible. Please"
                    " contact an administrator or update"
                    " the Storage Sevice URL below."
                    "<hr />%(error)s" % {"error": err}
                ),
            )

    if not_created_yet:
        if storage_form.is_valid():
            try:
                setup_pipeline_in_ss(
                    storage_form.cleaned_data["storage_service_use_default_config"]
                )
            except Exception as err:
                messages.warning(
                    request,
                    _(
                        "Storage Service failed to create the"
                        " pipeline. This can happen if"
                        " the pipeline exists but it is"
                        " disabled. Please contact an"
                        " administrator."
                        "<hr />%(error)s" % {"error": err}
                    ),
                )
        else:
            messages.warning(
                request,
                _(
                    "Storage Service returned a 404 error."
                    " Has the pipeline been disabled or is"
                    " it not registered yet? Submitting"
                    " form will attempt to register the"
                    " pipeline."
                ),
            )

    return render(request, "administration/general.html", locals())
Exemple #43
0
def general(request):
    initial_data = _intial_settings_data()
    initial_data['storage_service_use_default_config'] = {
        'False': False
    }.get(initial_data.get('storage_service_use_default_config', True), True)
    general_form = GeneralSettingsForm(request.POST or None,
                                       prefix='general',
                                       initial=initial_data)
    storage_form = StorageSettingsForm(request.POST or None,
                                       prefix='storage',
                                       initial=initial_data)
    checksum_form = ChecksumSettingsForm(request.POST or None,
                                         prefix='checksum algorithm',
                                         initial=initial_data)

    forms = (general_form, storage_form, checksum_form)
    if all(map(lambda form: form.is_valid(), forms)):
        for item in forms:
            item.save()
        messages.info(request, _('Saved.'))

    dashboard_uuid = helpers.get_setting('dashboard_uuid')

    not_created_yet = False
    try:
        pipeline = storage_service.get_pipeline(dashboard_uuid)
    except Exception as err:
        if err.response is not None and err.response.status_code == 404:
            # The server has returned a 404, we're going to assume that this is
            # the Storage Service telling us that the pipeline is unknown.
            not_created_yet = True
        else:
            messages.warning(
                request,
                _('Storage Service inaccessible. Please'
                  ' contact an administrator or update'
                  ' the Storage Sevice URL below.'
                  '<hr />%(error)s' % {'error': err}))

    if not_created_yet:
        if storage_form.is_valid():
            try:
                setup_pipeline_in_ss(
                    storage_form.
                    cleaned_data['storage_service_use_default_config'])
            except Exception as err:
                messages.warning(
                    request,
                    _('Storage Service failed to create the'
                      ' pipeline. This can happen if'
                      ' the pipeline exists but it is'
                      ' disabled. Please contact an'
                      ' administrator.'
                      '<hr />%(error)s' % {'error': err}))
        else:
            messages.warning(
                request,
                _('Storage Service returned a 404 error.'
                  ' Has the pipeline been disabled or is'
                  ' it not registered yet? Submitting'
                  ' form will attempt to register the'
                  ' pipeline.'))

    return render(request, 'administration/general.html', locals())
Exemple #44
0
def patch_list(filt=None):
    """List patches matching all of a given set of filters.

    Filter patches by one or more of the below fields:

     * id
     * name
     * project_id
     * submitter_id
     * delegate_id
     * archived
     * state_id
     * date
     * commit_ref
     * hash
     * msgid

    It is also possible to specify the number of patches returned via
    a ``max_count`` filter.

     * max_count

    With the exception of ``max_count``, the specified field of the
    patches are compared to the search string using a provided
    field lookup type, which can be one of:

     * iexact
     * contains
     * icontains
     * gt
     * gte
     * lt
     * in
     * startswith
     * istartswith
     * endswith
     * iendswith
     * range
     * year
     * month
     * day
     * isnull

    Please refer to the Django documentation for more information on
    these field lookup types.

    An example filter would look like so:

    {
        'name__icontains': 'Joe Bloggs',
        'max_count': 1,
    }

    Args:
        filt (dict): The filters specifying the field to compare, the
            lookup type and the value to compare against. Keys are of
            format ``[FIELD_NAME]`` or ``[FIELD_NAME]__[LOOKUP_TYPE]``.
            Example: ``name__icontains``. Values are plain strings to
            compare against.

    Returns:
        A serialized list of patches matching filters, if any. A list
        of all patches if no filter given.
    """
    if filt is None:
        filt = {}

    # We allow access to many of the fields.  But, some fields are
    # filtered by raw object so we must lookup by ID instead over
    # XML-RPC.
    ok_fields = [
        'id',
        'name',
        'project_id',
        'submitter_id',
        'delegate_id',
        'archived',
        'state_id',
        'date',
        'commit_ref',
        'hash',
        'msgid',
        'max_count',
    ]

    dfilter = {}
    max_count = 0

    for key in filt:
        parts = key.split('__')
        if parts[0] not in ok_fields:
            # Invalid field given
            return []
        if len(parts) > 1:
            if LOOKUP_TYPES.count(parts[1]) == 0:
                # Invalid lookup type given
                return []

        try:
            if parts[0] == 'project_id':
                dfilter['project'] = Project.objects.get(id=filt[key])
            elif parts[0] == 'submitter_id':
                dfilter['submitter'] = Person.objects.get(id=filt[key])
            elif parts[0] == 'delegate_id':
                dfilter['delegate'] = Person.objects.get(id=filt[key])
            elif parts[0] == 'state_id':
                dfilter['state'] = State.objects.get(id=filt[key])
            elif parts[0] == 'max_count':
                max_count = filt[key]
            else:
                dfilter[key] = filt[key]
        except (Project.DoesNotExist, Person.DoesNotExist, State.DoesNotExist):
            # Invalid Project, Person or State given
            return []

    patches = Patch.objects.filter(**dfilter)

    if max_count > 0:
        return list(map(patch_to_dict, patches[:max_count]))
    elif max_count < 0:
        min_count = patches.count() + max_count
        return list(map(patch_to_dict, patches[min_count:]))
    else:
        return list(map(patch_to_dict, patches))