def newswall_archive(path):
    """
    {% newswall_archive request.path as archive %}
    {% with base="/{{ LANGUAGE_CODE }}/blog/" %}
    <ul class="toc">
        <li class="title"><a href="{{ base }}">Archiv</a></li>
        {% for year, months in archive.items %}
            <li><a href="{{ base }}{{ year }}/">{{ year }}</a></li>
            {% for month in months %}
                <li><a href="{{ base }}{{ month|date:"Y/m/" }}">&nbsp; &nbsp; {{ month|date:"F" }}</a></li>
            {% endfor %}
        {% endfor %}
    </ul>
    {% endwith %}
    :param path: request.path
    :return: SortedDict containing the dates which contain entries.
    """
    match = YEAR_RE.search(path)
    if match:
        year = int(match.group(1))
    else:
        year = date.today().year

    archive = SortedDict()
    for month in Story.objects.active().dates('timestamp', 'month', 'DESC'):
        archive.setdefault(month.year, [])
        if month.year == year:
            archive.setdefault(month.year, []).append(month)
    return archive
示例#2
0
def _group_events(events):
    tzinfo = get_current_timezone()
    events_grouped = SortedDict()
    for event in events:
        start_date = event.start.astimezone(tzinfo).date()
        events_grouped.setdefault(start_date, []).append(event)
    return events_grouped
class FormWithSpecification(forms.ModelForm):
    def __init__(self, *args, **kwargs):
        super(FormWithSpecification, self).__init__(*args, **kwargs)

        self.specification_fields = SortedDict()
        if self.instance and self.instance.pk and self.instance.specification:
            self.instance.specification.update_fields(self.instance)
            for field in self.instance.fields.select_related('field__group'):
                self.specification_fields.setdefault(field.group, []).append(
                    field.add_formfield(self))

    def save(self, *args, **kwargs):
        instance = super(FormWithSpecification, self).save(*args, **kwargs)

        if self.specification_fields:
            for field in instance.fields.all():
                field.update_value(self)

        return instance

    def specification_field_values(self):
        if not (self.instance and self.instance.pk and self.instance.specification):
            return {}

        values = {}
        for field in self.instance.fields.all():
            values[field.key] = field.get_value(self)
        return values
def newswall_archive(path):
    """
    {% newswall_archive request.path as archive %}
    {% with base="/{{ LANGUAGE_CODE }}/blog/" %}
    <ul class="toc">
        <li class="title"><a href="{{ base }}">Archiv</a></li>
        {% for year, months in archive.items %}
            <li><a href="{{ base }}{{ year }}/">{{ year }}</a></li>
            {% for month in months %}
                <li><a href="{{ base }}{{ month|date:"Y/m/" }}">&nbsp; &nbsp; {{ month|date:"F" }}</a></li>
            {% endfor %}
        {% endfor %}
    </ul>
    {% endwith %}
    :param path: request.path
    :return: SortedDict containing the dates which contain entries.
    """
    match = YEAR_RE.search(path)
    if match:
        year = int(match.group(1))
    else:
        year = date.today().year

    archive = SortedDict()
    for month in Story.objects.active().dates('timestamp', 'month', 'DESC'):
        archive.setdefault(month.year, [])
        if month.year == year:
            archive.setdefault(month.year, []).append(month)
    return archive
 def get_value(self, context, image_list):
     result = SortedDict()
     for image in image_list:
         if image.group:
             group_name = image.group
             result.setdefault(group_name, []).append(image)
     return result
def login_user(request):

    username = password = ''
    if request.POST:
        username = request.POST.get('username')
        password = request.POST.get('password')

        user = authenticate(username=username, password=password)
        if user is not None:
            if user.is_active:
                login(request, user)
                return HttpResponseRedirect(REDIRECT_HOME)

    boards = Board.objects.all()
    grouping = SortedDict()
    for entry in boards:
        grouping.setdefault(entry.topic.name, [])
        grouping[entry.topic.name].append(entry)

    context = {
        'username': username,
        'boards': grouping,
    }

    return render(request, 'bulletinboard/login.html', context)
示例#7
0
 def get_months_active(self):
     """
     Creates a SortedDict of the format:
     {
         ...
         '2010': {
             first_day_of_month_datetime: pk_of_first_log,
             ...
         },
     }
     """
     current_month = datetime.datetime.today().month
     # Added the current month to the key to automatically update
     minmax_dict_key = "minmax_dict_%s_%s" % (self.id, current_month)
     minmax_dict = cache.get(minmax_dict_key, None)
     if minmax_dict is None:
         minmax_dict = self.log_set.all().aggregate(
             last_log=Max("timestamp"), first_log=Min("timestamp"))
         if not minmax_dict['first_log']:
             return SortedDict()
         # cache for 10 days
         cache.set(minmax_dict_key, minmax_dict, 864000)
     first_log = minmax_dict['first_log'].date()
     last_log = minmax_dict['last_log'].date()
     last_log = datetime.date(last_log.year, last_log.month, 1)
     current = datetime.date(first_log.year, first_log.month, 1)
     months_active = SortedDict()
     while current <= last_log:
         months_active.setdefault(current.year, []).append(current)
         if current.month == 12:
             current = datetime.date(current.year + 1, 1, 1)
         else:
             current = datetime.date(current.year, current.month + 1, 1)
     return months_active
示例#8
0
    def get_context_data(self, form, **kwargs):
        context = super(CodingJobExportView, self).get_context_data(form=form, **kwargs)

        # add fields for schema fields
        sections = SortedDict() # section : [(id, field, subfields) ..]
        subfields = {} # fieldname -> subfields reference

        for name in form.fields:
            if form[name].is_hidden:
                continue
            prefix = name.split("_")[0]
            section = {"schemafield" : "Field options", "meta" : "Metadata options"}.get(prefix, "General options")

            if prefix == "schemafield" and not name.endswith("_included"):
                continue
            subfields[name] = []
            sections.setdefault(section, []).append((name, form[name], subfields[name]))

        # sort coding fields
        if 'Field options' in sections:
            sections["Field options"].sort()

        for name in form.fields: # add subordinate fields
            prefix = name.split("_")[0]
            if prefix == "schemafield" and not name.endswith("_included"):
                subfields[name.rsplit("_", 1)[0] + "_included"].append((name, form[name]))

        for flds in subfields.values():
            flds.sort()

        context['sections'] = sections
        return context
示例#9
0
文件: models.py 项目: MechanisM/philo
	def get_favored_results(self, error=5, threshhold=None):
		"""
		Calculates the set of most-favored results based on their weight. Evenly-weighted results will be grouped together and either added or excluded as a group.
		
		:param error: An arbitrary number; higher values will cause this method to be more reticent about adding new items to the favored results.
		:param threshhold: Will be passed directly into :meth:`get_weighted_results`
		
		"""
		if not hasattr(self, '_favored_results'):
			results = self.get_weighted_results(threshhold)
			
			grouped_results = SortedDict()
			
			for result in results:
				grouped_results.setdefault(result.weight, []).append(result)
			
			self._favored_results = []
			
			for value, subresults in grouped_results.items():
				cost = error * sum([(value - result.weight)**2 for result in self._favored_results])
				if value > cost:
					self._favored_results += subresults
				else:
					break
			if len(self._favored_results) == len(results):
				self._favored_results = []
		return self._favored_results
示例#10
0
    def get_favored_results(self, error=5, threshhold=None):
        """
		Calculates the set of most-favored results based on their weight. Evenly-weighted results will be grouped together and either added or excluded as a group.
		
		:param error: An arbitrary number; higher values will cause this method to be more reticent about adding new items to the favored results.
		:param threshhold: Will be passed directly into :meth:`get_weighted_results`
		
		"""
        if not hasattr(self, '_favored_results'):
            results = self.get_weighted_results(threshhold)

            grouped_results = SortedDict()

            for result in results:
                grouped_results.setdefault(result.weight, []).append(result)

            self._favored_results = []

            for value, subresults in grouped_results.items():
                cost = error * sum([(value - result.weight)**2
                                    for result in self._favored_results])
                if value > cost:
                    self._favored_results += subresults
                else:
                    break
            if len(self._favored_results) == len(results):
                self._favored_results = []
        return self._favored_results
示例#11
0
 def get_months_active(self):
     """
     Creates a SortedDict of the format:
     {
         ...
         '2010': {
             first_day_of_month_datetime: pk_of_first_log,
             ...
         },
     }
     """
     current_month = datetime.datetime.today().month
     # Added the current month to the key to automatically update
     minmax_dict_key = "minmax_dict_%s_%s" % (self.id, current_month)
     minmax_dict = cache.get(minmax_dict_key, None)
     if minmax_dict is None:
         minmax_dict = self.log_set.all().aggregate(
             last_log=Max("timestamp"),
             first_log=Min("timestamp"))
         if not minmax_dict['first_log']:
             return SortedDict()
         # cache for 10 days
         cache.set(minmax_dict_key, minmax_dict, 864000)
     first_log = minmax_dict['first_log'].date()
     last_log = minmax_dict['last_log'].date()
     last_log = datetime.date(last_log.year, last_log.month, 1)
     current = datetime.date(first_log.year, first_log.month, 1)
     months_active = SortedDict()
     while current <= last_log:
         months_active.setdefault(current.year, []).append(current)
         if current.month == 12:
             current = datetime.date(current.year + 1, 1, 1)
         else:
             current = datetime.date(current.year, current.month + 1, 1)
     return months_active
    def get_context_data(self, form, **kwargs):
        context = super(CodingJobExportView, self).get_context_data(form=form, **kwargs)

        # add fields for schema fields
        sections = SortedDict() # section : [(id, field, subfields) ..]
        subfields = {} # fieldname -> subfields reference

        for name in form.fields:
            if form[name].is_hidden:
                continue
            prefix = name.split("_")[0]
            section = {"schemafield" : "Field options", "meta" : "Metadata options"}.get(prefix, "General options")

            if prefix == "schemafield" and not name.endswith("_included"):
                continue
            subfields[name] = []
            sections.setdefault(section, []).append((name, form[name], subfields[name]))

        # sort coding fields
        codingfields = sorted(sections["Field options"])
        sections["Field options"].sort()

        for name in form.fields: # add subordinate fields        
            prefix = name.split("_")[0]
            if prefix == "schemafield" and not name.endswith("_included"):
                subfields[name.rsplit("_", 1)[0] + "_included"].append((name, form[name]))

        for flds in subfields.values():
            flds.sort()
            
        context['sections'] = sections
        return context
示例#13
0
    def url(self):
        if self.options.get('cht', None) == 't':
            self.datasets.append(self.options.pop("_mapdata"))

        # Figure out the chart's data range
        if not self.datarange:
            maxvalue = max(
                max(d) for d in chain(self.datasets, self.hidden_datasets)
                if d)
            minvalue = min(
                min(d) for d in chain(self.datasets, self.hidden_datasets)
                if d)
            self.datarange = (minvalue, maxvalue)

        # Encode data
        if "chds" in self.options or self.options.get('cht', None) == 'gom':
            # text encoding if scaling provided, or for google-o-meter type
            data = "|".join(
                encode_text(d)
                for d in chain(self.datasets, self.hidden_datasets))
            encoded_data = "t%d:%s" % (len(self.datasets), data)
        else:
            # extended encoding otherwise
            data = extended_separator.join(
                encode_extended(d, self.datarange)
                for d in chain(self.datasets, self.hidden_datasets))
            encoded_data = "e%d:%s" % (len(self.datasets), data)

        # Update defaults
        for k in self.defaults:
            if k not in self.options:
                self.options[k] = self.defaults[k]

        # Start to calculate the URL
        url = "%s?%s&chd=%s" % (self.BASE, urlencode(
            self.options), encoded_data)

        # Calculate axis options
        if self.axes:
            axis_options = SortedDict()
            axis_sides = []
            for i, axis in enumerate(self.axes):
                axis_sides.append(axis.side)
                for opt in axis.options:
                    try:
                        axis_options.setdefault(opt, []).append(
                            axis.options[opt] % i)
                    except TypeError:
                        pass

            # Turn the option lists into strings
            axis_sides = smart_join(",", *axis_sides)
            for opt in axis_options:
                axis_options[opt] = smart_join("|", *axis_options[opt])

            url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options))

        return url
示例#14
0
def group_by_field(queryset, field):
    result = SortedDict()
    for item in queryset:
        if callable(field):
            key = field(item)
        else:
            key = getattr(item, field)
        result.setdefault(key, []).append(item)
    return result
示例#15
0
def __get_cust_and_pledges(from_date, to_date):
    pledges = Pledge.objects.filter(status='Open', loan_date__range=(from_date, to_date)).order_by('loan_date', 'id')
    
    cust_vs_pledges = SortedDict()
    dateformat = formats.DATE_INPUT_FORMATS[0]
    for p in pledges:
        cust_vs_pledges.setdefault(p.customer, list()).append({'pledge_no':p.pledge_no, 'loan_date':p.loan_date.strftime(dateformat), 'principle':p.principle})
    
    return cust_vs_pledges
示例#16
0
def node_settings(request, node_id, node_slug):
    """
    Node Settings
    """
    node = get_object_or_404(Node, pk=node_id)
    if node.slug != node_slug:
        return HttpResponseRedirect(
            reverse('node_settings',
                    kwargs={
                        'node_id': node_id,
                        'node_slug': node.slug
                    }))

    if not (request.user.is_admin('openode.change_node')
            or request.user.has_openode_perm('node_settings', node)):
        return render_forbidden(request)

    NodeUserInlineFormSet = inlineformset_factory(Node,
                                                  NodeUser,
                                                  form=NodeUserForm,
                                                  extra=1)
    node_users = NodeUser.objects.filter(node=node).order_by(
        'role', 'user__last_name', 'user__first_name')

    if request.method == "POST":
        form = NodeSettingsForm(instance=node, data=request.POST)
        formset = NodeUserInlineFormSet(request.POST,
                                        instance=node,
                                        queryset=node_users)
        form_is_valid = form.is_valid()
        formset_is_valid = formset.is_valid()
        if form_is_valid and formset_is_valid:
            form.save(user=request.user)
            formset.save()
            request.user.message_set.create(
                message=_('Node settings has been succesfully saved.'))
            return HttpResponseRedirect(
                reverse('node_settings', args=[node.pk, node.slug]))

    else:
        form = NodeSettingsForm(instance=node)
        formset = NodeUserInlineFormSet(instance=node, queryset=node_users)

    user_emails_by_role = SortedDict()
    for node_user in node_users:
        user_emails_by_role.setdefault(node_user.get_role_display(),
                                       []).append(node_user.user.email)

    template_data = {
        'node': node,
        'form': form,
        'formset': formset,
        'user_emails_by_role': user_emails_by_role,
        'page_class': 'node-edit',
    }

    return render_into_skin('node/edit_settings.html', template_data, request)
示例#17
0
    def get_filters(self, obj):
        view = self.context['view']

        url = QueryURLObject(view.url)
        filter_mapping = SortedDict(
            (filter_['slug'], filter_) for filter_ in view.serialized_filters)

        filter_groups = SortedDict()

        try:
            facet_counts = [(k, getattr(obj, 'aggregations',
                                        {})[k]['doc_count'])
                            for k in filter_mapping.keys()]
        except KeyError:
            facet_counts = []

        for slug, count in facet_counts:

            filter_ = filter_mapping.get(slug, None)
            if filter_ is None:
                filter_name = slug
                group_name = None
                group_slug = None
            else:
                # Let's check if we can get the name from the gettext catalog
                filter_name = _(filter_['name'])
                group_name = _(filter_['group']['name'])
                group_slug = filter_['group']['slug']

            filter_groups.setdefault(
                (group_name, group_slug, filter_['group']['order']),
                []).append(
                    Filter(url=url,
                           page=view.current_page,
                           name=filter_name,
                           slug=slug,
                           count=count,
                           active=slug in view.selected_filters,
                           group_name=group_name,
                           group_slug=group_slug))

        # return a sorted list of filters here
        grouped_filters = []
        for group_options, filters in filter_groups.items():
            group_name, group_slug, group_order = group_options
            sorted_filters = sorted(filters, key=attrgetter('name'))
            grouped_filters.append(
                FilterGroup(name=group_name,
                            slug=group_slug,
                            order=group_order,
                            options=sorted_filters))
        return FacetedFilterSerializer(sorted(grouped_filters,
                                              key=attrgetter('order'),
                                              reverse=True),
                                       many=True).data
示例#18
0
def codingjob_export_options(request, project):
    if request.GET.get("use_session"):
        jobs = json.loads(request.session['export_job_ids'])
    else:
        jobs = request.GET.getlist("codingjobs")
    level = int(request.GET["export_level"])
    form = GetCodingJobResults.options_form(
        request.POST or None, project=project, codingjobs=jobs, export_level=level,
        initial=dict(codingjobs=jobs, export_level=level)
    )

    
    
    sections = SortedDict() # section : [(id, field, subfields) ..]
    subfields = {} # fieldname -> subfields reference

    for name in form.fields:
        if form[name].is_hidden:
            continue
        prefix = name.split("_")[0]
        section = {"schemafield" : "Field options", "meta" : "Metadata options"}.get(prefix, "General options")

        if prefix == "schemafield" and not name.endswith("_included"):
            continue
        subfields[name] = []
        sections.setdefault(section, []).append((name, form[name], subfields[name]))
        form[name].subfields = []

    # sort coding fields
    codingfields = sorted(sections["Field options"])
    sections["Field options"].sort()
    
    for name in form.fields: # add subordinate fields        
        prefix = name.split("_")[0]
        if prefix == "schemafield" and not name.endswith("_included"):
            subfields[name.rsplit("_", 1)[0] + "_included"].append((name, form[name]))

    for flds in subfields.values():
        flds.sort()
            
    if form.is_valid():
        results = GetCodingJobResults(form).run()

        eformat = {f.label : f for f in EXPORT_FORMATS}[form.cleaned_data["export_format"]]
        
        if eformat.mimetype is not None:
            if len(jobs) > 3:
                jobs = jobs[:3] + ["etc"]
            filename = "Codingjobs {j} {now}.{ext}".format(j=",".join(str(j) for j in jobs), now=datetime.datetime.now(), ext=eformat.label)
            response = HttpResponse(content_type=eformat.mimetype, status=200)
            response['Content-Disposition'] = 'attachment; filename="{filename}"'.format(**locals())
            response.write(results)
            return response

    return render(request, 'navigator/project/export_options.html', locals())
def picture_detail(request, picture):
    picture = get_object_or_404(Picture, slug=picture)

    categories = SortedDict()
    for tag in picture.tags.iterator():
        categories.setdefault(tag.category, []).append(tag)

    picture_themes = []

    return render_to_response("catalogue/picture_detail.html", locals(),
                              context_instance=RequestContext(request))
示例#20
0
    def get_filters(self, obj):
        view = self.context['view']

        url = QueryURLObject(view.url)
        filter_mapping = SortedDict((filter_['slug'], filter_)
                                    for filter_ in view.serialized_filters)

        filter_groups = SortedDict()

        try:
            facet_counts = [
                (k, getattr(obj, 'aggregations', {})[k]['doc_count'])
                for k in filter_mapping.keys()]
        except KeyError:
            facet_counts = []

        for slug, count in facet_counts:

            filter_ = filter_mapping.get(slug, None)
            if filter_ is None:
                filter_name = slug
                group_name = None
                group_slug = None
            else:
                # Let's check if we can get the name from the gettext catalog
                filter_name = _(filter_['name'])
                group_name = _(filter_['group']['name'])
                group_slug = filter_['group']['slug']

            filter_groups.setdefault((
                group_name,
                group_slug,
                filter_['group']['order']
            ), []).append(
                Filter(url=url, page=view.current_page, name=filter_name,
                       slug=slug, count=count, active=slug in
                       view.selected_filters, group_name=group_name,
                       group_slug=group_slug)
            )

        # return a sorted list of filters here
        grouped_filters = []
        for group_options, filters in filter_groups.items():
            group_name, group_slug, group_order = group_options
            sorted_filters = sorted(filters, key=attrgetter('name'))
            grouped_filters.append(FilterGroup(name=group_name,
                                               slug=group_slug,
                                               order=group_order,
                                               options=sorted_filters))
        return FacetedFilterSerializer(
            sorted(grouped_filters, key=attrgetter('order'), reverse=True),
            many=True
        ).data
示例#21
0
def picture_detail(request, picture):
    picture = get_object_or_404(Picture, slug=picture)

    categories = SortedDict()
    for tag in picture.tags.iterator():
        categories.setdefault(tag.category, []).append(tag)

    picture_themes = []

    return render_to_response("catalogue/picture_detail.html",
                              locals(),
                              context_instance=RequestContext(request))
def picture_list(request, filter=None, template_name='catalogue/picture_list.html'):
    """ generates a listing of all books, optionally filtered with a test function """

    pictures_by_author, orphans = Picture.picture_list()
    books_nav = SortedDict()
    for tag in pictures_by_author:
        if pictures_by_author[tag]:
            books_nav.setdefault(tag.sort_key[0], []).append(tag)

            #    import pdb; pdb.set_trace()
    return render_to_response(template_name, locals(),
        context_instance=RequestContext(request))
示例#23
0
def book_list(request, filter=None, template_name='catalogue/book_list.html',
        context=None):
    """ generates a listing of all books, optionally filtered with a test function """

    books_by_author, orphans, books_by_parent = models.Book.book_list(filter)
    books_nav = SortedDict()
    for tag in books_by_author:
        if books_by_author[tag]:
            books_nav.setdefault(tag.sort_key[0], []).append(tag)

    return render_to_response(template_name, locals(),
        context_instance=RequestContext(request))
示例#24
0
文件: base.py 项目: emulbreh/ecs
 def docs(self):
     d = SortedDict()
     for name in self.get_field_names():
         prefix, key = self.split_prefix(name)
         info = self.get_field_docs(name)
         if prefix:
             d.setdefault(prefix, {})
             d[prefix][key] = info
         else:
             d[name] = info
     d.keyOrder = list(sorted(d.keys()))
     return d
def home(request):

    #if user does not have a user profile, create one
    profile = UserProfile.objects.filter(user=request.user)
    if not profile:
        profile = UserProfile(user=request.user, avatar=DEFAULT_AVATAR, about_me=" ",
                              birthdate=datetime.date.today(), hometown=" ", present_location=" ")
        if request.user.is_superuser or request.user.is_staff:
            profile.user_type = 3
        profile.save()

    boards = Board.objects.all()
    topic_form = TopicForm()
    board_form = BoardForm()
    profile = UserProfile.objects.get(user=request.user)

    if request.method == 'POST':
        #user clicks the 'Add Topic'
        if 'addtopic' in request.POST:
            topic_form = TopicForm(request.POST)
            if topic_form.is_valid():
                topic_form.save()
                return HttpResponseRedirect(REDIRECT_HOME)
        #user clicks the 'Add Board' button
        elif 'addboard' in request.POST:
            board_form = BoardForm(request.POST, request.FILES)
            if board_form.is_valid():
                board = board_form.save(commit=False)
                if Board.objects.all():
                    last_board = Board.objects.aggregate(last_rank=Max('rank'))
                    board.rank = last_board['last_rank']+1
                else:
                    board.rank = 1
                board.save()

                return HttpResponseRedirect(REDIRECT_HOME)

    #group boards according to their topics
    grouping = SortedDict()
    for entry in boards:
        grouping.setdefault(entry.topic.name, [])
        grouping[entry.topic.name].append(entry)

    context = {
        'boards': grouping,
        'profile': profile,
        'reorder': True,
        'topic_form': topic_form,
        'board_form': board_form,
    }
    return render(request, 'bulletinboard/home.html', context)
示例#26
0
    def url(self):
        if self.options.get('cht', None) == 't':
            self.datasets.append(self.options.pop("_mapdata"))

        # Figure out the chart's data range
        if not self.datarange:
            if self.datasets == [[]]:
                maxvalue = 0
                minvalue = 0
            else:
                maxvalue = max(max(d) for d in self.datasets if d)
                minvalue = min(min(d) for d in self.datasets if d)
            self.datarange = (minvalue, maxvalue)
        
        # Encode data
        if "chds" in self.options or self.options.get('cht', None) == 'gom': 
            # text encoding if scaling provided, or for google-o-meter type
            data = "|".join(encode_text(d) for d in self.datasets)
            encoded_data = "t:%s" % data
        else: 
            # extended encoding otherwise
            data = extended_separator.join(encode_extended(d, self.datarange) for d in self.datasets)
            encoded_data = "e:%s" % data
        
        # Update defaults
        for k in self.defaults:
            if k not in self.options:
                self.options[k] = self.defaults[k]
        
        # Start to calcuate the URL
        url = "%s?%s&chd=%s" % (self.BASE, urlencode(self.options), encoded_data)
        
        # Calculate axis options
        if self.axes:
            axis_options = SortedDict()
            axis_sides = []
            for i, axis in enumerate(self.axes):
                axis_sides.append(axis.side)
                for opt in axis.options:
                    axis_options.setdefault(opt, []).append(axis.options[opt] % i)
        
            # Turn the option lists into strings
            axis_sides = smart_join(",", *axis_sides)
            for opt in axis_options:
                axis_options[opt] = smart_join("|", *axis_options[opt])
            
            url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options))
            
        return url
示例#27
0
def picture_list(request,
                 filter=None,
                 template_name='catalogue/picture_list.html'):
    """ generates a listing of all books, optionally filtered with a test function """

    pictures_by_author, orphans = Picture.picture_list()
    books_nav = SortedDict()
    for tag in pictures_by_author:
        if pictures_by_author[tag]:
            books_nav.setdefault(tag.sort_key[0], []).append(tag)

            #    import pdb; pdb.set_trace()
    return render_to_response(template_name,
                              locals(),
                              context_instance=RequestContext(request))
示例#28
0
def get_message_list(project_id, lang_id, src_filters={}, target_filters={}):
    lang_id = int(lang_id)
    project_language = Project.objects.get(id=project_id).lang.id

    new_query = SetMessage.objects.all()

    if not new_query.exists():
        return []

    new_query = new_query.filter(_normalize_filters(src_filters))

    target_predicate = _normalize_filters(target_filters)
    if target_predicate:
        new_query = new_query.filter(Q(lang=project_language) | Q(target_predicate))

    res = SortedDict()

    new_query = _update_message_query(new_query, project_id, lang_id)

    for data in new_query.order_by('msgid'):
        msg_info = res.setdefault(data.msgid, {'msg_id': data.msgid})
        if data.lang_id == lang_id:
            msg_info.update(
                {
                    'msg_target': data.msgstr,
                    'target_id': data.id,
                    'is_translated': data.is_translated
                }
            )
        if data.lang_id == project_language:
            msg_info.update({'msg_source': data.msgstr, 'id': data.id})
    messages = [i for i in res.values() if 'msg_target' in i]
    return messages
示例#29
0
文件: queries.py 项目: Faldrian/kuma
    def faceted_filters(self):
        url = QueryURLObject(self.url)
        filter_mapping = SortedDict((filter_['slug'], filter_)
                                    for filter_ in self.serialized_filters)

        filter_groups = SortedDict()

        for slug, facet in self.facet_counts().items():
            # if not isinstance(facet, dict):
            #     # let's just blankly ignore any non-filter or non-query filters
            #     continue

            filter_ = filter_mapping.get(slug, None)
            if filter_ is None:
                filter_name = slug
                group_name = None
                group_slug = None
            else:
                # Let's check if we can get the name from the gettext catalog
                filter_name = _(filter_['name'])
                group_name = _(filter_['group']['name'])
                group_slug = filter_['group']['slug']

            filter_groups.setdefault((
                group_name,
                group_slug,
                filter_['group']['order']
            ), []).append(
                Filter(url=url,
                       page=self.current_page,
                       name=filter_name,
                       slug=slug,
                       count=facet['count'],
                       active=slug in self.selected_filters,
                       group_name=group_name,
                       group_slug=group_slug))

        # return a sorted list of filters here
        grouped_filters = []
        for group_options, filters in filter_groups.items():
            group_name, group_slug, group_order = group_options
            sorted_filters = sorted(filters, key=attrgetter('name'))
            grouped_filters.append(FilterGroup(name=group_name,
                                               slug=group_slug,
                                               order=group_order,
                                               options=sorted_filters))
        return sorted(grouped_filters, key=attrgetter('order'), reverse=True)
示例#30
0
def get_group_servers_dict(request, group_id=0, get_server_list=False):
    '''服务器分区列表获取,配合widget/group_server.html
    '''

    group_servers_dict = SortedDict()

    exclude_list = set()

    if group_id:
        groups = request.admin.get_resource('server_group').filter(
            id=group_id)  # .prefetch_related('server')
    else:
        groups = request.admin.get_resource(
            'server_group').all()  # .prefetch_related('server')

    other_servers = request.admin.get_resource('server').prefetch_related(
        'group_set')

    not_group_set = set()
    has_add_group_server_set = set()
    for s in other_servers:
        g_s_list = s.group_set.all()

        if g_s_list:
            for g in g_s_list:
                if g in groups:
                    group_servers_dict.setdefault(g, set())
                    group_servers_dict[g].add(s)
                    has_add_group_server_set.add(s)
                else:
                    not_group_set.add(s)
        else:
            not_group_set.add(s)
        exclude_list.add(s)

    if not_group_set:
        not_group = Group()
        not_group.id = 0
        not_group.name = '其他'
        group_servers_dict[
            not_group] = not_group_set - has_add_group_server_set

    if get_server_list:
        server_list = exclude_list
        return group_servers_dict, server_list
    else:
        return group_servers_dict
示例#31
0
文件: queries.py 项目: tannishk/kuma
    def faceted_filters(self):
        url = QueryURLObject(self.url)
        filter_mapping = SortedDict(
            (filter_['slug'], filter_) for filter_ in self.serialized_filters)

        filter_groups = SortedDict()

        for slug, facet in self.facet_counts().items():
            # if not isinstance(facet, dict):
            #     # let's just blankly ignore any non-filter or non-query filters
            #     continue

            filter_ = filter_mapping.get(slug, None)
            if filter_ is None:
                filter_name = slug
                group_name = None
                group_slug = None
            else:
                # Let's check if we can get the name from the gettext catalog
                filter_name = _(filter_['name'])
                group_name = _(filter_['group']['name'])
                group_slug = filter_['group']['slug']

            filter_groups.setdefault(
                (group_name, group_slug, filter_['group']['order']),
                []).append(
                    Filter(url=url,
                           page=self.current_page,
                           name=filter_name,
                           slug=slug,
                           count=facet['count'],
                           active=slug in self.selected_filters,
                           group_name=group_name,
                           group_slug=group_slug))

        # return a sorted list of filters here
        grouped_filters = []
        for group_options, filters in filter_groups.items():
            group_name, group_slug, group_order = group_options
            sorted_filters = sorted(filters, key=attrgetter('name'))
            grouped_filters.append(
                FilterGroup(name=group_name,
                            slug=group_slug,
                            order=group_order,
                            options=sorted_filters))
        return sorted(grouped_filters, key=attrgetter('order'), reverse=True)
示例#32
0
文件: node.py 项目: JvGinkel/openode
def node_settings(request, node_id, node_slug):
    """
    Node Settings
    """
    node = get_object_or_404(Node, pk=node_id)
    if node.slug != node_slug:
        return HttpResponseRedirect(reverse('node_settings', kwargs={
            'node_id': node_id,
            'node_slug': node.slug
        }))

    if not (request.user.is_admin('openode.change_node') or request.user.has_openode_perm('node_settings', node)):
        return render_forbidden(request)

    NodeUserInlineFormSet = inlineformset_factory(Node, NodeUser, form=NodeUserForm, extra=1)
    node_users = NodeUser.objects.filter(node=node).order_by('role', 'user__last_name', 'user__first_name')

    if request.method == "POST":
        form = NodeSettingsForm(instance=node, data=request.POST)
        formset = NodeUserInlineFormSet(request.POST, instance=node, queryset=node_users)
        form_is_valid = form.is_valid()
        formset_is_valid = formset.is_valid()
        if form_is_valid and formset_is_valid:
            form.save(user=request.user)
            formset.save()
            request.user.message_set.create(message=_('Node settings has been succesfully saved.'))
            return HttpResponseRedirect(reverse('node_settings', args=[node.pk, node.slug]))

    else:
        form = NodeSettingsForm(instance=node)
        formset = NodeUserInlineFormSet(instance=node, queryset=node_users)

    user_emails_by_role = SortedDict()
    for node_user in node_users:
        user_emails_by_role.setdefault(node_user.get_role_display(), []).append(node_user.user.email)

    template_data = {
        'node': node,
        'form': form,
        'formset': formset,
        'user_emails_by_role': user_emails_by_role,
        'page_class': 'node-edit',
    }

    return render_into_skin('node/edit_settings.html', template_data, request)
示例#33
0
def __get_cust_and_pledges(from_date, to_date):
    pledges = Pledge.objects.filter(
        status='Open',
        loan_date__range=(from_date, to_date)).order_by('loan_date', 'id')

    cust_vs_pledges = SortedDict()
    dateformat = formats.DATE_INPUT_FORMATS[0]
    for p in pledges:
        cust_vs_pledges.setdefault(p.customer, list()).append({
            'pledge_no':
            p.pledge_no,
            'loan_date':
            p.loan_date.strftime(dateformat),
            'principle':
            p.principle
        })

    return cust_vs_pledges
示例#34
0
def get_method_info(method):

    info = {}

    help_text = pydoc.getdoc(method)

    args = SortedDict()
    desc_re = re.compile(':(?P<desc>param|parameter|arg|argument|key|keyword)\s+(?P<name>.+):\s+(?P<value>.+)')
    type_re = re.compile(':(?P<type>type)\s+(?P<name>.+):\s+(?P<value>.+)')
    for expression in (desc_re, type_re):
        for match in expression.finditer(help_text):
            data = match.groupdict()
            if 'desc' in data:
                key = 'desc'
            else:
                key = 'type'
            name = data['name']
            value = data['value']
            args.setdefault(name, {})
            args[name][key] = value
        help_text = expression.sub('', help_text)
    if args:
        info['args'] = args

    desc_re = re.compile(':(?P<desc>returns?):\s+(?P<value>.+)')
    type_re = re.compile(':(?P<type>rtype):\s+(?P<value>.+)')
    for expression in (desc_re, type_re):
        match = expression.search(help_text)
        if match:
            data = match.groupdict()
            if 'desc' in data:
                key = 'desc'
            else:
                key = 'type'
            value = data['value']
            info.setdefault('returns', {})
            info['returns'][key] = value
        help_text = expression.sub('', help_text)

    info['help_text'] = help_text.strip()
    info['signature'] = get_signature(method)

    return info
    def get_default_fields(self):
        """
        Returns all the form fields that should be serialized.
        """
        ret = SortedDict()

        # Get a serializer field for each form field.
        for key, form_field in self.get_form().fields.iteritems():
            # TODO: Get multiple serializer fields for ``MultiValueField`` and
            #       ``MultiValueWidget`` form fields and widgets.
            field = self.get_field(form_field)
            if field:
                ret[key] = field

        # Add fields from the super class.
        fields = super(FormSerializerMixin, self).get_default_fields()
        for key, field in fields.iteritems():
            ret.setdefault(key, field)

        return ret
    def DashboardDetail(self, **unused_args):
        """Returns a dict of time to measurement type to count."""
        start_time = self.request.get('start_time')
        end_time = self.request.get('start_time')
        limit = self.request.get('limit')

        entries = model.ValidationEntry.all()

        if start_time:
            start_time = util.MicrosecondsSinceEpochToTime(int(start_time))
        if end_time:
            end_time = util.MicrosecondsSinceEpochToTime(int(end_time))
        if limit:
            limit = int(limit)
        else:
            limit = 1000

        # TODO(drc): Incorporate date limits.
        time_to_type_to_cnt = SortedDict()

        # group by time
        for ent in entries.fetch(limit):
            ms_time = ent.summary.timestamp_start
            meas_type = ent.summary.measurement_type
            time_to_type_to_cnt.setdefault(ms_time, dict()).setdefault(
                meas_type, {'details': []})
            time_to_type_to_cnt[ms_time][meas_type][
                'count'] = ent.summary.error_count
            # links to ids for eventually showing more detail
            time_to_type_to_cnt[ms_time][meas_type]['details'].append([
                ent.measurement.key().id(),
                ent.measurement.device_properties.device_info.id
            ])

        # now sort by time
        sorted_results = SortedDict()
        for k in sorted(time_to_type_to_cnt.iterkeys()):
            sorted_results[k] = time_to_type_to_cnt[k]

        return sorted_results
示例#37
0
def book_list(request, filter=None, template_name='catalogue/book_list.html'):
    """ generates a listing of all books, optionally filtered with a test function """

    form = forms.SearchForm()

    books_by_parent = {}
    books = models.Book.objects.all().order_by('parent_number', 'title').only('title', 'parent', 'slug')
    if filter:
        books = books.filter(filter).distinct()
        book_ids = set((book.pk for book in books))
        for book in books:
            parent = book.parent_id
            if parent not in book_ids:
                parent = None
            books_by_parent.setdefault(parent, []).append(book)
    else:
        for book in books:
            books_by_parent.setdefault(book.parent_id, []).append(book)

    orphans = []
    books_by_author = SortedDict()
    books_nav = SortedDict()
    for tag in models.Tag.objects.filter(category='author'):
        books_by_author[tag] = []

    for book in books_by_parent.get(None,()):
        authors = list(book.tags.filter(category='author'))
        if authors:
            for author in authors:
                books_by_author[author].append(book)
        else:
            orphans.append(book)

    for tag in books_by_author:
        if books_by_author[tag]:
            books_nav.setdefault(tag.sort_key[0], []).append(tag)

    return render_to_response(template_name, locals(),
        context_instance=RequestContext(request))
示例#38
0
  def DashboardDetail(self, **unused_args):
    """Returns a dict of time to measurement type to count."""
    start_time = self.request.get('start_time')
    end_time = self.request.get('start_time')
    limit = self.request.get('limit')

    entries = model.ValidationEntry.all()

    if start_time:
      start_time = util.MicrosecondsSinceEpochToTime(int(start_time))
    if end_time:
      end_time = util.MicrosecondsSinceEpochToTime(int(end_time))
    if limit:
      limit = int(limit)
    else:
      limit = 1000

    # TODO(drc): Incorporate date limits.
    time_to_type_to_cnt = SortedDict()

    # group by time
    for ent in entries.fetch(limit):
      ms_time = ent.summary.timestamp_start
      meas_type = ent.summary.measurement_type
      time_to_type_to_cnt.setdefault(ms_time, dict()).setdefault(
          meas_type, {'details': []})
      time_to_type_to_cnt[ms_time][meas_type]['count'] = ent.summary.error_count
      # links to ids for eventually showing more detail
      time_to_type_to_cnt[ms_time][meas_type]['details'].append(
        [ent.measurement.key().id(),
         ent.measurement.device_properties.device_info.id])

    # now sort by time
    sorted_results = SortedDict()
    for k in sorted(time_to_type_to_cnt.iterkeys()):
      sorted_results[k] = time_to_type_to_cnt[k]

    return sorted_results
示例#39
0
文件: views.py 项目: gloggi/aure4
def al_bereich(request, abteilung):
    abteilung = get_object_or_404(Abteilung, slug=abteilung)

    if abteilung not in request.user.abteilungen.all():
        return HttpResponseForbidden('Du bist nicht AL dieser Abteilung')

    anmeldungen = SortedDict()
    for anmeldung in Anmeldung.objects.filter(abteilung=abteilung):
        abteilung_tns = anmeldungen.setdefault(anmeldung.kurs, [])
        abteilung_tns.append(anmeldung)

    return render(request, 'kurse/albereich.html', {
        'abteilung': abteilung,
        'anmeldungen': anmeldungen
    })
示例#40
0
def book_list(request, filter=None, get_filter=None,
        template_name='catalogue/book_list.html',
        nav_template_name='catalogue/snippets/book_list_nav.html',
        list_template_name='catalogue/snippets/book_list.html',
        cache_key='catalogue.book_list',
        context=None,
        ):
    """ generates a listing of all books, optionally filtered with a test function """
    cached = permanent_cache.get(cache_key)
    if cached is not None:
        rendered_nav, rendered_book_list = cached
    else:
        if get_filter:
            filter = get_filter()
        books_by_author, orphans, books_by_parent = models.Book.book_list(filter)
        books_nav = SortedDict()
        for tag in books_by_author:
            if books_by_author[tag]:
                books_nav.setdefault(tag.sort_key[0], []).append(tag)
        rendered_nav = render_to_string(nav_template_name, locals())
        rendered_book_list = render_to_string(list_template_name, locals())
        permanent_cache.set(cache_key, (rendered_nav, rendered_book_list))
    return render_to_response(template_name, locals(),
        context_instance=RequestContext(request))
示例#41
0
文件: views.py 项目: gloggi/aure4
def al_bereich(request, abteilung):
    abteilung = get_object_or_404(Abteilung, slug=abteilung)

    if abteilung not in request.user.abteilungen.all():
        return HttpResponseForbidden('Du bist nicht AL dieser Abteilung')

    anmeldungen = SortedDict()
    for anmeldung in Anmeldung.objects.filter(abteilung=abteilung):
        abteilung_tns = anmeldungen.setdefault(anmeldung.kurs, [])
        abteilung_tns.append(anmeldung)

    return render(request, 'kurse/albereich.html', {
        'abteilung': abteilung,
        'anmeldungen': anmeldungen
    })
示例#42
0
class CrossGridReport(object):
    def __init__(self,
                 title,
                 row_reduce,
                 row_map,
                 col_reduce,
                 agg_function,
                 header_map,
                 row_sort=None):
        """
        row_reduce:   reference to row reduce function
                      it take element and should return row key

        row_map:      function to map row basic object;
                      take 1 argument: object;
                      value, returned by this function will be used when
                      printing row

        col_reduce:   same as row_reduce but for columns

        agg_function: funciton will call when apped value to the column
                      for extract data from object;
                      take 2 arguments: object, current value

        header_map: function will call when adding new key into column header
                    take 1 argument: object

        """
        self.title = title
        self.row_reduce = row_reduce
        self.row_map = row_map
        self.col_reduce = col_reduce
        self.agg_function = agg_function
        self.header_map = header_map
        self.row_sort = row_sort

        self.row = SortedDict()
        self.columns = SortedDict()

    def append(self, obj):
        row_key = self.row_reduce(obj)
        col_key = self.col_reduce(obj)

        if col_key not in self.columns:
            self.columns[col_key] = self.header_map(obj)

        row_obj = self.row_map(obj)
        row = self.append_row(row_obj, row_key)
        row.append(col_key, obj)

    def append_row(self, row_obj, row_key):
        return self.row.setdefault(row_key, ReportRow(self, row_obj, row_key))

    def append_column(self, col_obj, col_key):
        self.columns.setdefault(col_key, col_obj)

    def iter_columns(self):
        return self.columns.itervalues()

    def iter_columns_key(self):
        return self.columns.iterkeys()

    def iter_rows(self):
        return self.row.itervalues()
示例#43
0
    def compress(self, log=None, **options):
        """
        Searches templates containing 'compress' nodes and compresses them
        "offline" -- outside of the request/response cycle.

        The result is cached with a cache-key derived from the content of the
        compress nodes (not the content of the possibly linked files!).
        """
        extensions = options.get('extensions')
        extensions = self.handle_extensions(extensions or ['html'])
        verbosity = int(options.get("verbosity", 0))
        if not log:
            log = StringIO()
        if not settings.TEMPLATE_LOADERS:
            raise OfflineGenerationError("No template loaders defined. You "
                                         "must set TEMPLATE_LOADERS in your "
                                         "settings.")
        paths = set()
        for loader in self.get_loaders():
            try:
                module = import_module(loader.__module__)
                get_template_sources = getattr(module, 'get_template_sources',
                                               None)
                if get_template_sources is None:
                    get_template_sources = loader.get_template_sources
                paths.update(list(get_template_sources('')))
            except (ImportError, AttributeError):
                # Yeah, this didn't work out so well, let's move on
                pass
        if not paths:
            raise OfflineGenerationError("No template paths found. None of "
                                         "the configured template loaders "
                                         "provided template paths. See "
                                         "http://django.me/template-loaders "
                                         "for more information on template "
                                         "loaders.")
        if verbosity > 1:
            log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
        templates = set()
        for path in paths:
            for root, dirs, files in walk(path,
                                          followlinks=options.get(
                                              'followlinks', False)):
                templates.update(
                    os.path.join(root, name) for name in files
                    if not name.startswith('.') and any(
                        fnmatch(name, "*%s" % glob) for glob in extensions))
        if not templates:
            raise OfflineGenerationError("No templates found. Make sure your "
                                         "TEMPLATE_LOADERS and TEMPLATE_DIRS "
                                         "settings are correct.")
        if verbosity > 1:
            log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")

        compressor_nodes = SortedDict()
        for template_name in templates:
            try:
                template_file = open(template_name)
                try:
                    template = Template(template_file.read().decode(
                        settings.FILE_CHARSET))
                finally:
                    template_file.close()
            except IOError:  # unreadable file -> ignore
                if verbosity > 0:
                    log.write("Unreadable template at: %s\n" % template_name)
                continue
            except TemplateSyntaxError:  # broken template -> ignore
                if verbosity > 0:
                    log.write("Invalid template at: %s\n" % template_name)
                continue
            except UnicodeDecodeError:
                if verbosity > 0:
                    log.write("UnicodeDecodeError while trying to read "
                              "template %s\n" % template_name)
            nodes = list(self.walk_nodes(template))
            if nodes:
                template.template_name = template_name
                compressor_nodes.setdefault(template, []).extend(nodes)

        if not compressor_nodes:
            raise OfflineGenerationError(
                "No 'compress' template tags found in templates.")

        if verbosity > 0:
            log.write("Found 'compress' tags in:\n\t" +
                      "\n\t".join((t.template_name
                                   for t in compressor_nodes.keys())) + "\n")

        log.write("Compressing... ")
        count = 0
        results = []
        offline_manifest = {}
        for template, nodes in compressor_nodes.iteritems():
            context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
            extra_context = {}
            firstnode = template.nodelist[0]
            if isinstance(firstnode, ExtendsNode):
                # If this template has a ExtendsNode, we apply our patch to
                # generate the necessary context, and then use it for all the
                # nodes in it, just in case (we don't know which nodes were
                # in a block)
                firstnode._old_get_parent = firstnode.get_parent
                firstnode.get_parent = MethodType(patched_get_parent,
                                                  firstnode)
                extra_context = firstnode.render(context)
                context.render_context = extra_context.render_context
            for node in nodes:
                context.push()
                if extra_context and node._block_name:
                    context['block'] = context.render_context[
                        BLOCK_CONTEXT_KEY].pop(node._block_name)
                    if context['block']:
                        context['block'].context = context
                key = get_offline_hexdigest(node.nodelist)
                try:
                    result = node.render(context, forced=True)
                except Exception, e:
                    raise CommandError("An error occured during rendering: "
                                       "%s" % e)
                offline_manifest[key] = result
                context.pop()
                results.append(result)
                count += 1
示例#44
0
    def compress(self, log=None, **options):
        """
        Searches templates containing 'compress' nodes and compresses them
        "offline" -- outside of the request/response cycle.

        The result is cached with a cache-key derived from the content of the
        compress nodes (not the content of the possibly linked files!).
        """
        extensions = options.get('extensions')
        extensions = self.handle_extensions(extensions or ['html'])
        verbosity = int(options.get("verbosity", 0))
        if not log:
            log = StringIO()
        if not settings.TEMPLATE_LOADERS:
            raise OfflineGenerationError("No template loaders defined. You "
                                         "must set TEMPLATE_LOADERS in your "
                                         "settings.")
        paths = set()
        for loader in self.get_loaders():
            try:
                module = import_module(loader.__module__)
                get_template_sources = getattr(module,
                    'get_template_sources', None)
                if get_template_sources is None:
                    get_template_sources = loader.get_template_sources
                paths.update(list(get_template_sources('')))
            except (ImportError, AttributeError):
                # Yeah, this didn't work out so well, let's move on
                pass
        if not paths:
            raise OfflineGenerationError("No template paths found. None of "
                                         "the configured template loaders "
                                         "provided template paths. See "
                                         "http://django.me/template-loaders "
                                         "for more information on template "
                                         "loaders.")
        if verbosity > 1:
            log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
        templates = set()
        for path in paths:
            for root, dirs, files in walk(path,
                    followlinks=options.get('followlinks', False)):
                templates.update(os.path.join(root, name)
                    for name in files if not name.startswith('.') and
                        any(fnmatch(name, "*%s" % glob) for glob in extensions))
        if not templates:
            raise OfflineGenerationError("No templates found. Make sure your "
                                         "TEMPLATE_LOADERS and TEMPLATE_DIRS "
                                         "settings are correct.")
        if verbosity > 1:
            log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")

        compressor_nodes = SortedDict()
        for template_name in templates:
            try:
                template_file = open(template_name)
                try:
                    template = Template(template_file.read().decode(
                                        settings.FILE_CHARSET))
                finally:
                    template_file.close()
            except IOError:  # unreadable file -> ignore
                if verbosity > 0:
                    log.write("Unreadable template at: %s\n" % template_name)
                continue
            except TemplateSyntaxError:  # broken template -> ignore
                if verbosity > 0:
                    log.write("Invalid template at: %s\n" % template_name)
                continue
            except TemplateDoesNotExist:  # non existent template -> ignore
                if verbosity > 0:
                    log.write("Non-existent template at: %s\n" % template_name)
                continue
            except UnicodeDecodeError:
                if verbosity > 0:
                    log.write("UnicodeDecodeError while trying to read "
                              "template %s\n" % template_name)
            nodes = list(self.walk_nodes(template))
            if nodes:
                template.template_name = template_name
                compressor_nodes.setdefault(template, []).extend(nodes)

        if not compressor_nodes:
            raise OfflineGenerationError(
                "No 'compress' template tags found in templates.")

        if verbosity > 0:
            log.write("Found 'compress' tags in:\n\t" +
                      "\n\t".join((t.template_name for t in compressor_nodes.keys())) + "\n")

        log.write("Compressing... ")
        count = 0
        results = []
        offline_manifest = {}
        for template, nodes in compressor_nodes.iteritems():
            context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
            extra_context = {}
            firstnode = template.nodelist[0]
            if isinstance(firstnode, ExtendsNode):
                # If this template has a ExtendsNode, we apply our patch to
                # generate the necessary context, and then use it for all the
                # nodes in it, just in case (we don't know which nodes were
                # in a block)
                firstnode._old_get_parent = firstnode.get_parent
                firstnode.get_parent = MethodType(patched_get_parent, firstnode)
                try:
                    extra_context = firstnode.render(context)
                    context.render_context = extra_context.render_context
                except (IOError, TemplateSyntaxError, TemplateDoesNotExist):
                    # That first node we are trying to render might cause more errors
                    # that we didn't catch when simply creating a Template instance
                    # above, so we need to catch that (and ignore it, just like above)
                    # as well.
                    if verbosity > 0:
                        log.write("Caught error when rendering extend node from template %s\n" % template.template_name)
                    continue
            for node in nodes:
                context.push()
                if extra_context and node._block_name:
                    context['block'] = context.render_context[BLOCK_CONTEXT_KEY].pop(node._block_name)
                    if context['block']:
                        context['block'].context = context
                key = get_offline_hexdigest(node.nodelist.render(context))
                try:
                    result = node.render(context, forced=True)
                except Exception, e:
                    raise CommandError("An error occured during rendering %s: "
                                       "%s" % (template.template_name, e))
                offline_manifest[key] = result
                context.pop()
                results.append(result)
                count += 1
示例#45
0
class CachedFilesMixin(object):
    patterns = (("*.css", (
        r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
        r"""(@import\s*["']\s*(.*?)["'])""",
    )), )

    def __init__(self, *args, **kwargs):
        super(CachedFilesMixin, self).__init__(*args, **kwargs)
        try:
            self.cache = get_cache('staticfiles')
        except (InvalidCacheBackendError, ValueError):
            # Use the default backend
            self.cache = default_cache
        self._patterns = SortedDict()
        for extension, patterns in self.patterns:
            for pattern in patterns:
                compiled = re.compile(pattern)
                self._patterns.setdefault(extension, []).append(compiled)

    def hashed_name(self, name, content=None):
        if content is None:
            if not self.exists(name):
                raise ValueError("The file '%s' could not be found with %r." %
                                 (name, self))
            try:
                content = self.open(name)
            except IOError:
                # Handle directory paths
                return name
        path, filename = os.path.split(name)
        root, ext = os.path.splitext(filename)
        # Get the MD5 hash of the file
        md5 = md5_constructor()
        for chunk in content.chunks():
            md5.update(chunk)
        md5sum = md5.hexdigest()[:12]
        return os.path.join(path, u"%s.%s%s" % (root, md5sum, ext))

    def cache_key(self, name):
        return u'staticfiles:cache:%s' % name

    def url(self, name, force=False):
        """
        Returns the real URL in DEBUG mode.
        """
        if settings.DEBUG and not force:
            return super(CachedFilesMixin, self).url(name)
        cache_key = self.cache_key(name)
        hashed_name = self.cache.get(cache_key)
        if hashed_name is None:
            hashed_name = self.hashed_name(name)
        return super(CachedFilesMixin, self).url(hashed_name)

    def url_converter(self, name):
        """
        Returns the custom URL converter for the given file name.
        """
        def converter(matchobj):
            """
            Converts the matched URL depending on the parent level (`..`)
            and returns the normalized and hashed URL using the url method
            of the storage.
            """
            matched, url = matchobj.groups()
            # Completely ignore http(s) prefixed URLs
            if url.startswith(('http', 'https')):
                return matched
            name_parts = name.split('/')
            # Using posix normpath here to remove duplicates
            url = posixpath.normpath(url)
            url_parts = url.split('/')
            parent_level, sub_level = url.count('..'), url.count('/')
            if url.startswith('/'):
                sub_level -= 1
                url_parts = url_parts[1:]
            if parent_level or not url.startswith('/'):
                start, end = parent_level + 1, parent_level
            else:
                if sub_level:
                    if sub_level == 1:
                        parent_level -= 1
                    start, end = parent_level, sub_level - 1
                else:
                    start, end = 1, sub_level - 1
            joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
            hashed_url = self.url(joined_result, force=True)
            # Return the hashed and normalized version to the file
            return 'url("%s")' % hashed_url

        return converter

    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).
        """
        processed_files = []
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return processed_files

        # delete cache of all handled paths
        self.cache.delete_many([self.cache_key(path) for path in paths])

        # only try processing the files we have patterns for
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        processing_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths, key=path_level, reverse=True):

            # first get a hashed name for the given file
            hashed_name = self.hashed_name(name)

            original_file = self.open(name)
            try:
                # then get the original's file content
                content = original_file.read()

                # to apply each replacement pattern on the content
                if name in processing_paths:
                    converter = self.url_converter(name)
                    for patterns in self._patterns.values():
                        for pattern in patterns:
                            content = pattern.sub(converter, content)

                # then save the processed result
                if self.exists(hashed_name):
                    self.delete(hashed_name)

                saved_name = self._save(hashed_name, ContentFile(content))
                hashed_name = force_unicode(saved_name.replace('\\', '/'))
                processed_files.append(hashed_name)

                # and then set the cache accordingly
                self.cache.set(self.cache_key(name), hashed_name)
            finally:
                original_file.close()

        return processed_files
示例#46
0
class NestedObjects(object):
    """
    A directed acyclic graph collection that exposes the add() API
    expected by Model._collect_sub_objects and can present its data as
    a nested list of objects.

    """
    def __init__(self):
        # Use object keys of the form (model, pk) because actual model
        # objects may not be unique

        # maps object key to list of child keys
        self.children = SortedDict()

        # maps object key to parent key
        self.parents = SortedDict()

        # maps object key to actual object
        self.seen = SortedDict()

    def add(self,
            model,
            pk,
            obj,
            parent_model=None,
            parent_obj=None,
            nullable=False):
        """
        Add item ``obj`` to the graph. Returns True (and does nothing)
        if the item has been seen already.

        The ``parent_obj`` argument must already exist in the graph; if
        not, it's ignored (but ``obj`` is still added with no
        parent). In any case, Model._collect_sub_objects (for whom
        this API exists) will never pass a parent that hasn't already
        been added itself.

        These restrictions in combination ensure the graph will remain
        acyclic (but can have multiple roots).

        ``model``, ``pk``, and ``parent_model`` arguments are ignored
        in favor of the appropriate lookups on ``obj`` and
        ``parent_obj``; unlike CollectedObjects, we can't maintain
        independence from the knowledge that we're operating on model
        instances, and we don't want to allow for inconsistency.

        ``nullable`` arg is ignored: it doesn't affect how the tree of
        collected objects should be nested for display.
        """
        model, pk = type(obj), obj._get_pk_val()

        # auto-created M2M models don't interest us
        if model._meta.auto_created:
            return True

        key = model, pk

        if key in self.seen:
            return True
        self.seen.setdefault(key, obj)

        if parent_obj is not None:
            parent_model, parent_pk = (type(parent_obj),
                                       parent_obj._get_pk_val())
            parent_key = (parent_model, parent_pk)
            if parent_key in self.seen:
                self.children.setdefault(parent_key, list()).append(key)
                self.parents.setdefault(key, parent_key)

    def _nested(self, key, format_callback=None, **kwargs):
        obj = self.seen[key]
        if format_callback:
            ret = [format_callback(obj, **kwargs)]
        else:
            ret = [obj]

        children = []
        for child in self.children.get(key, ()):
            children.extend(self._nested(child, format_callback, **kwargs))
        if children:
            ret.append(children)

        return ret

    def nested(self, format_callback=None, **kwargs):
        """
        Return the graph as a nested list.

        Passes **kwargs back to the format_callback as kwargs.

        """
        roots = []
        for key in self.seen.keys():
            if key not in self.parents:
                roots.extend(self._nested(key, format_callback, **kwargs))
        return roots
示例#47
0
    def compress(self, log=None, **options):
        """
        Searches templates containing 'compress' nodes and compresses them
        "offline" -- outside of the request/response cycle.

        The result is cached with a cache-key derived from the content of the
        compress nodes (not the content of the possibly linked files!).
        """
        extensions = options.get('extensions')
        extensions = self.handle_extensions(extensions or ['html'])
        verbosity = int(options.get("verbosity", 0))
        if not log:
            log = StringIO()
        if not settings.TEMPLATE_LOADERS:
            raise OfflineGenerationError("No template loaders defined. You "
                                         "must set TEMPLATE_LOADERS in your "
                                         "settings.")
        paths = set()
        for loader in self.get_loaders():
            try:
                module = import_module(loader.__module__)
                get_template_sources = getattr(module, 'get_template_sources',
                                               None)
                if get_template_sources is None:
                    get_template_sources = loader.get_template_sources
                paths.update(list(get_template_sources('')))
            except (ImportError, AttributeError):
                # Yeah, this didn't work out so well, let's move on
                pass
        if not paths:
            raise OfflineGenerationError("No template paths found. None of "
                                         "the configured template loaders "
                                         "provided template paths. See "
                                         "http://django.me/template-loaders "
                                         "for more information on template "
                                         "loaders.")
        if verbosity > 1:
            log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
        templates = set()
        for path in paths:
            for root, dirs, files in walk(path,
                                          followlinks=options.get(
                                              'followlinks', False)):
                templates.update(
                    os.path.join(root, name) for name in files
                    if not name.startswith('.') and any(
                        fnmatch(name, "*%s" % glob) for glob in extensions))
        if not templates:
            raise OfflineGenerationError("No templates found. Make sure your "
                                         "TEMPLATE_LOADERS and TEMPLATE_DIRS "
                                         "settings are correct.")
        if verbosity > 1:
            log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")

        compressor_nodes = SortedDict()
        for template_name in templates:
            try:
                template_file = open(template_name)
                try:
                    template = Template(template_file.read().decode(
                        settings.FILE_CHARSET))
                finally:
                    template_file.close()
            except IOError:  # unreadable file -> ignore
                if verbosity > 0:
                    log.write("Unreadable template at: %s\n" % template_name)
                continue
            except TemplateSyntaxError:  # broken template -> try jinja -> ignore if still broken
                try:
                    template_file = open(template_name)
                    template = jinja_env.parse(template_file.read().decode(
                        settings.FILE_CHARSET))
                    template.is_jinja = True
                    template.name = template_name
                except jinja2.exceptions.TemplateSyntaxError:
                    if verbosity > 0:
                        log.write("Invalid template at: %s\n" % template_name)
                    continue
                finally:
                    template_file.close()
            except UnicodeDecodeError:
                if verbosity > 0:
                    log.write("UnicodeDecodeError while trying to read "
                              "template %s\n" % template_name)
            if getattr(template, 'is_jinja', False):
                nodes = template.find_all(jinja2.nodes.CallBlock)
                for node in nodes:
                    try:
                        compress_node = node.call.node
                        if (compress_node.identifier ==
                                'compressor.contrib.jinja2ext.CompressorExtension'
                                and compress_node.name == '_compress'):
                            template.template_name = template_name
                            compressor_nodes.setdefault(template,
                                                        []).append(node)
                    except AttributeError, IndexError:
                        pass
            else:
                nodes = list(self.walk_nodes(template))
                if nodes:
                    template.template_name = template_name
                    compressor_nodes.setdefault(template, []).extend(nodes)
示例#48
0
class FunctionPool(local):
    """
    A function pool that uses thread locals for storage. This is used to
    queue up functions to run once when necessary. This means that each
    thread has its own pool of messages.

    """
    def __iter__(self):
        """Return all queued functions."""
        if hasattr(self, '_thread_data'):
            for key, value in self._thread_data.iteritems():
                if key:
                    yield value
                else:
                    for item in value:
                        yield item

    def __len__(self):
        if hasattr(self, '_thread_data'):
            return len(self._thread_data)
        else:
            return 0

    def execute(self):
        """Execute all queued functions."""

        # Get all of the queued functions.
        functions = list(self)

        # Ensure the queue is cleared before running any functions.
        # This avoids triggering another post_commit signal, which would
        # execute this again, getting into an infinite loop.
        self.clear()

        # Run the functions.
        for func in functions:
            func()

    def queue(self, func, key=None):
        """
        Queues a function to call after the transaction is committed. Use a key
        when you want to ensure that an action won't get triggered multiple
        times.

        Eg. you might want queue this function (lambda: sync_account(123))
        multiple times, but it only makes sense to run it once after a
        transaction is committed. In this case, you could use the key
        'sync_account.123' to ensure it only runs ones.

        """

        if not hasattr(self, '_thread_data'):
            self._thread_data = SortedDict()

        if key:
            self._thread_data[key] = func
        else:
            self._thread_data.setdefault(None, [])
            self._thread_data[None].append(func)

    def clear(self):
        if hasattr(self, '_thread_data'):
            del self._thread_data
示例#49
0
    def compress(self, log=None, **options):
        """
        Searches templates containing 'compress' nodes and compresses them
        "offline" -- outside of the request/response cycle.

        The result is cached with a cache-key derived from the content of the
        compress nodes (not the content of the possibly linked files!).
        """
        extensions = options.get('extensions')
        extensions = self.handle_extensions(extensions or ['html'])
        verbosity = int(options.get("verbosity", 0))
        if not log:
            log = StringIO()
        if not settings.TEMPLATE_LOADERS:
            raise OfflineGenerationError("No template loaders defined. You "
                                         "must set TEMPLATE_LOADERS in your "
                                         "settings.")
        paths = set()
        for loader in self.get_loaders():
            try:
                module = import_module(loader.__module__)
                get_template_sources = getattr(module, 'get_template_sources',
                                               None)
                if get_template_sources is None:
                    get_template_sources = loader.get_template_sources
                paths.update(list(get_template_sources('')))
            except (ImportError, AttributeError):
                # Yeah, this didn't work out so well, let's move on
                pass
        if not paths:
            raise OfflineGenerationError("No template paths found. None of "
                                         "the configured template loaders "
                                         "provided template paths. See "
                                         "http://django.me/template-loaders "
                                         "for more information on template "
                                         "loaders.")
        if verbosity > 1:
            log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
        templates = set()
        for path in paths:
            for root, dirs, files in os.walk(path,
                                             followlinks=options.get(
                                                 'followlinks', False)):
                templates.update(
                    os.path.join(root, name) for name in files
                    if not name.startswith('.') and any(
                        fnmatch(name, "*%s" % glob) for glob in extensions))
        if not templates:
            raise OfflineGenerationError("No templates found. Make sure your "
                                         "TEMPLATE_LOADERS and TEMPLATE_DIRS "
                                         "settings are correct.")
        if verbosity > 1:
            log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")

        engine = options.get("engine", "django")
        parser = self.__get_parser(engine)

        compressor_nodes = SortedDict()
        for template_name in templates:
            try:
                template = parser.parse(template_name)
            except IOError:  # unreadable file -> ignore
                if verbosity > 0:
                    log.write("Unreadable template at: %s\n" % template_name)
                continue
            except TemplateSyntaxError as e:  # broken template -> ignore
                if verbosity > 0:
                    log.write("Invalid template %s: %s\n" % (template_name, e))
                continue
            except TemplateDoesNotExist:  # non existent template -> ignore
                if verbosity > 0:
                    log.write("Non-existent template at: %s\n" % template_name)
                continue
            except UnicodeDecodeError:
                if verbosity > 0:
                    log.write("UnicodeDecodeError while trying to read "
                              "template %s\n" % template_name)
            try:
                nodes = list(parser.walk_nodes(template))
            except (TemplateDoesNotExist, TemplateSyntaxError) as e:
                # Could be an error in some base template
                if verbosity > 0:
                    log.write("Error parsing template %s: %s\n" %
                              (template_name, e))
                continue
            if nodes:
                template.template_name = template_name
                compressor_nodes.setdefault(template, []).extend(nodes)

        if not compressor_nodes:
            raise OfflineGenerationError(
                "No 'compress' template tags found in templates."
                "Try running compress command with --follow-links and/or"
                "--extension=EXTENSIONS")

        if verbosity > 0:
            log.write("Found 'compress' tags in:\n\t" +
                      "\n\t".join((t.template_name
                                   for t in compressor_nodes.keys())) + "\n")

        log.write("Compressing... ")
        count = 0
        results = []
        offline_manifest = SortedDict()
        init_context = parser.get_init_context(
            settings.COMPRESS_OFFLINE_CONTEXT)

        for template, nodes in compressor_nodes.items():
            context = Context(init_context)
            template._log = log
            template._log_verbosity = verbosity

            if not parser.process_template(template, context):
                continue

            for node in nodes:
                context.push()
                parser.process_node(template, context, node)
                rendered = parser.render_nodelist(template, context, node)
                key = get_offline_hexdigest(rendered)

                if key in offline_manifest:
                    continue

                try:
                    result = parser.render_node(template, context, node)
                except Exception as e:
                    raise CommandError("An error occured during rendering %s: "
                                       "%s" % (template.template_name, e))
                offline_manifest[key] = result
                context.pop()
                results.append(result)
                count += 1

        write_offline_manifest(offline_manifest)

        log.write("done\nCompressed %d block(s) from %d template(s).\n" %
                  (count, len(compressor_nodes)))
        return count, results
示例#50
0
class CachedFilesMixin(object):
    default_template = """url("%s")"""
    patterns = (("*.css", (
        r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
        (r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
    )), )

    def __init__(self, *args, **kwargs):
        super(CachedFilesMixin, self).__init__(*args, **kwargs)
        try:
            self.cache = get_cache('staticfiles')
        except InvalidCacheBackendError:
            # Use the default backend
            self.cache = default_cache
        self._patterns = SortedDict()
        for extension, patterns in self.patterns:
            for pattern in patterns:
                if isinstance(pattern, (tuple, list)):
                    pattern, template = pattern
                else:
                    template = self.default_template
                compiled = re.compile(pattern)
                self._patterns.setdefault(extension, []).append(
                    (compiled, template))

    def file_hash(self, name, content=None):
        """
        Retuns a hash of the file with the given name and optional content.
        """
        if content is None:
            return None
        md5 = hashlib.md5()
        for chunk in content.chunks():
            md5.update(chunk)
        return md5.hexdigest()[:12]

    def hashed_name(self, name, content=None):
        parsed_name = urlsplit(unquote(name))
        clean_name = parsed_name.path.strip()
        opened = False
        if content is None:
            if not self.exists(clean_name):
                raise ValueError("The file '%s' could not be found with %r." %
                                 (clean_name, self))
            try:
                content = self.open(clean_name)
            except IOError:
                # Handle directory paths and fragments
                return name
            opened = True
        try:
            file_hash = self.file_hash(clean_name, content)
        finally:
            if opened:
                content.close()
        path, filename = os.path.split(clean_name)
        root, ext = os.path.splitext(filename)
        if file_hash is not None:
            file_hash = ".%s" % file_hash
        hashed_name = os.path.join(path, "%s%s%s" % (root, file_hash, ext))
        unparsed_name = list(parsed_name)
        unparsed_name[2] = hashed_name
        # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
        # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
        if '?#' in name and not unparsed_name[3]:
            unparsed_name[2] += '?'
        return urlunsplit(unparsed_name)

    def cache_key(self, name):
        return 'staticfiles:%s' % hashlib.md5(smart_bytes(name)).hexdigest()

    def url(self, name, force=False):
        """
        Returns the real URL in DEBUG mode.
        """
        if settings.DEBUG and not force:
            hashed_name, fragment = name, ''
        else:
            clean_name, fragment = urldefrag(name)
            if urlsplit(clean_name).path.endswith('/'):  # don't hash paths
                hashed_name = name
            else:
                cache_key = self.cache_key(name)
                hashed_name = self.cache.get(cache_key)
                if hashed_name is None:
                    hashed_name = self.hashed_name(clean_name).replace(
                        '\\', '/')
                    # set the cache if there was a miss
                    # (e.g. if cache server goes down)
                    self.cache.set(cache_key, hashed_name)

        final_url = super(CachedFilesMixin, self).url(hashed_name)

        # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
        # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
        query_fragment = '?#' in name  # [sic!]
        if fragment or query_fragment:
            urlparts = list(urlsplit(final_url))
            if fragment and not urlparts[4]:
                urlparts[4] = fragment
            if query_fragment and not urlparts[3]:
                urlparts[2] += '?'
            final_url = urlunsplit(urlparts)

        return unquote(final_url)

    def url_converter(self, name, template=None):
        """
        Returns the custom URL converter for the given file name.
        """
        if template is None:
            template = self.default_template

        def converter(matchobj):
            """
            Converts the matched URL depending on the parent level (`..`)
            and returns the normalized and hashed URL using the url method
            of the storage.
            """
            matched, url = matchobj.groups()
            # Completely ignore http(s) prefixed URLs,
            # fragments and data-uri URLs
            if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
                return matched
            name_parts = name.split(os.sep)
            # Using posix normpath here to remove duplicates
            url = posixpath.normpath(url)
            url_parts = url.split('/')
            parent_level, sub_level = url.count('..'), url.count('/')
            if url.startswith('/'):
                sub_level -= 1
                url_parts = url_parts[1:]
            if parent_level or not url.startswith('/'):
                start, end = parent_level + 1, parent_level
            else:
                if sub_level:
                    if sub_level == 1:
                        parent_level -= 1
                    start, end = parent_level, 1
                else:
                    start, end = 1, sub_level - 1
            joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
            hashed_url = self.url(unquote(joined_result), force=True)
            file_name = hashed_url.split('/')[-1:]
            relative_url = '/'.join(url.split('/')[:-1] + file_name)

            # Return the hashed version to the file
            return template % unquote(relative_url)

        return converter

    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # where to store the new paths
        hashed_paths = {}

        # build a list of adjustable files
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        adjustable_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read().decode(
                        settings.FILE_CHARSET)
                    for patterns in self._patterns.values():
                        for pattern, template in patterns:
                            converter = self.url_converter(name, template)
                            content = pattern.sub(converter, content)
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(smart_bytes(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_text(saved_name.replace('\\', '/'))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_text(saved_name.replace('\\', '/'))

                # and then set the cache accordingly
                hashed_paths[self.cache_key(name.replace('\\',
                                                         '/'))] = hashed_name
                yield name, hashed_name, processed

        # Finally set the cache
        self.cache.set_many(hashed_paths)
示例#51
0
def codingjob_export_options(request, project):
    if request.GET.get("use_session"):
        jobs = json.loads(request.session['export_job_ids'])
    else:
        jobs = request.GET.getlist("codingjobs")
    level = int(request.GET["export_level"])
    form = GetCodingJobResults.options_form(request.POST or None,
                                            project=project,
                                            codingjobs=jobs,
                                            export_level=level,
                                            initial=dict(codingjobs=jobs,
                                                         export_level=level))

    sections = SortedDict()  # section : [(id, field, subfields) ..]
    subfields = {}  # fieldname -> subfields reference

    for name in form.fields:
        if form[name].is_hidden:
            continue
        prefix = name.split("_")[0]
        section = {
            "schemafield": "Field options",
            "meta": "Metadata options"
        }.get(prefix, "General options")

        if prefix == "schemafield" and not name.endswith("_included"):
            continue
        subfields[name] = []
        sections.setdefault(section, []).append(
            (name, form[name], subfields[name]))
        form[name].subfields = []

    # sort coding fields
    codingfields = sorted(sections["Field options"])
    sections["Field options"].sort()

    for name in form.fields:  # add subordinate fields
        prefix = name.split("_")[0]
        if prefix == "schemafield" and not name.endswith("_included"):
            subfields[name.rsplit("_", 1)[0] + "_included"].append(
                (name, form[name]))

    for flds in subfields.values():
        flds.sort()

    if form.is_valid():
        results = GetCodingJobResults(form).run()

        eformat = {f.label: f
                   for f in EXPORT_FORMATS}[form.cleaned_data["export_format"]]

        if eformat.mimetype is not None:
            if len(jobs) > 3:
                jobs = jobs[:3] + ["etc"]
            filename = "Codingjobs {j} {now}.{ext}".format(
                j=",".join(str(j) for j in jobs),
                now=datetime.datetime.now(),
                ext=eformat.label)
            response = HttpResponse(content_type=eformat.mimetype, status=200)
            response[
                'Content-Disposition'] = 'attachment; filename="{filename}"'.format(
                    **locals())
            response.write(results)
            return response

    return render(request, 'navigator/project/export_options.html', locals())
示例#52
0
    def compress(self, log=None, **options):
        """
        Searches templates containing 'compress' nodes and compresses them
        "offline" -- outside of the request/response cycle.

        The result is cached with a cache-key derived from the content of the
        compress nodes (not the content of the possibly linked files!).
        """
        extensions = options.get('extensions')
        extensions = self.handle_extensions(extensions or ['html'])
        verbosity = int(options.get("verbosity", 0))
        if not log:
            log = StringIO()
        if not settings.TEMPLATE_LOADERS:
            raise OfflineGenerationError("No template loaders defined. You "
                                         "must set TEMPLATE_LOADERS in your "
                                         "settings.")
        paths = set()
        for loader in self.get_loaders():
            try:
                module = import_module(loader.__module__)
                get_template_sources = getattr(module,
                    'get_template_sources', None)
                if get_template_sources is None:
                    get_template_sources = loader.get_template_sources
                paths.update(list(get_template_sources('')))
            except (ImportError, AttributeError):
                # Yeah, this didn't work out so well, let's move on
                pass
        if not paths:
            raise OfflineGenerationError("No template paths found. None of "
                                         "the configured template loaders "
                                         "provided template paths. See "
                                         "http://django.me/template-loaders "
                                         "for more information on template "
                                         "loaders.")
        if verbosity > 1:
            log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
        templates = set()
        for path in paths:
            for root, dirs, files in walk(path,
                    followlinks=options.get('followlinks', False)):
                templates.update(os.path.join(root, name)
                    for name in files if not name.startswith('.') and
                        any(fnmatch(name, "*%s" % glob) for glob in extensions))
        if not templates:
            raise OfflineGenerationError("No templates found. Make sure your "
                                         "TEMPLATE_LOADERS and TEMPLATE_DIRS "
                                         "settings are correct.")
        if verbosity > 1:
            log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")

        compressor_nodes = SortedDict()
        for template_name in templates:
            try:
                template_file = open(template_name)
                try:
                    template = Template(template_file.read().decode(
                                        settings.FILE_CHARSET))
                finally:
                    template_file.close()
            except IOError: # unreadable file -> ignore
                if verbosity > 0:
                    log.write("Unreadable template at: %s\n" % template_name)
                continue
            except TemplateSyntaxError: # broken template -> ignore
                if verbosity > 0:
                    log.write("Invalid template at: %s\n" % template_name)
                continue
            except UnicodeDecodeError:
                if verbosity > 0:
                    log.write("UnicodeDecodeError while trying to read "
                              "template %s\n" % template_name)
            nodes = list(self.walk_nodes(template))
            if nodes:
                compressor_nodes.setdefault(template_name, []).extend(nodes)

        if not compressor_nodes:
            raise OfflineGenerationError(
                "No 'compress' template tags found in templates.")

        if verbosity > 0:
            log.write("Found 'compress' tags in:\n\t" +
                      "\n\t".join(compressor_nodes.keys()) + "\n")

        log.write("Compressing... ")
        count = 0
        results = []
        context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
        for nodes in compressor_nodes.values():
            for node in nodes:
                key = get_offline_cachekey(node.nodelist)
                try:
                    result = node.render(context, forced=True)
                except Exception, e:
                    raise CommandError("An error occured during rending: "
                                       "%s" % e)
                cache.set(key, result, settings.COMPRESS_OFFLINE_TIMEOUT)
                results.append(result)
                count += 1
示例#53
0
    def export(self, response, all=False):
        courses_with_results = list()
        for course in self.semester.course_set.filter(state="published").all():
            results = SortedDict()
            for questionnaire, contributor, data, grade in calculate_results(course):
                results.setdefault(questionnaire.id, []).append((contributor, data, grade))
            courses_with_results.append((course, results))

        courses_with_results.sort(key=lambda cr: cr[0].kind)

        qn_frequencies = defaultdict(int)
        for course, results in courses_with_results:
            for questionnaire, results in results.items():
                qn_frequencies[questionnaire] += 1

        qn_relevant = qn_frequencies.items()
        qn_relevant.sort(key=lambda t: -t[1])

        questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant]

        self.workbook = xlwt.Workbook()
        self.sheet = self.workbook.add_sheet(_(u"Results"))
        self.row = 0
        self.col = 0

        # Adding evaP colors to palette
        xlwt.add_palette_colour("custom_dark_green", 0x20)
        self.workbook.set_colour_RGB(0x20, 120, 241, 89)
        xlwt.add_palette_colour("custom_light_green", 0x21)
        self.workbook.set_colour_RGB(0x21, 188, 241, 89)
        xlwt.add_palette_colour("custom_yellow", 0x22)
        self.workbook.set_colour_RGB(0x22, 241, 226, 89)
        xlwt.add_palette_colour("custom_orange", 0x23)
        self.workbook.set_colour_RGB(0x23, 241, 158, 89)
        xlwt.add_palette_colour("custom_red", 0x24)
        self.workbook.set_colour_RGB(0x24, 241, 89, 89)

        # formatting for average grades
        avg_style = xlwt.easyxf('alignment: horiz centre; font: bold on; borders: left medium, top medium, bottom medium')
        avg_style_very_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_dark_green; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0")
        avg_style_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_light_green; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0")
        avg_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour custom_yellow; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0")
        avg_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_orange; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0")
        avg_style_very_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_red; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0")

        # formatting for variances
        var_style_good = xlwt.easyxf('alignment: horiz centre; borders: right medium', num_format_str="0.0")
        var_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour gray25; alignment: horiz centre; borders: right medium', num_format_str="0.0")
        var_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour gray40; alignment: horiz centre; borders: right medium', num_format_str="0.0")

        # formatting for overall grades
        over_style_very_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_dark_green; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0")
        over_style_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_light_green; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0")
        over_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour custom_yellow; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0")
        over_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_orange; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0")
        over_style_very_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_red; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0")

        # formatting for special fields
        headline_style = xlwt.easyxf('font: bold on, height 400; alignment: horiz centre, vert centre, wrap on', num_format_str="0.0")
        course_style = xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium')
        course_unfinished_style = xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium; font: italic on')
        total_answers_style = xlwt.easyxf('alignment: horiz centre; borders: left medium, bottom medium, right medium')

        # general formattings
        bold_style = xlwt.easyxf('font: bold on')
        border_left_style = xlwt.easyxf('borders: left medium')
        border_right_style = xlwt.easyxf('borders: right medium')
        border_top_bottom_right_style = xlwt.easyxf('borders: top medium, bottom medium, right medium')

        self.writec(_(u"Evaluation {0} - created on {1}").format(self.semester.name, datetime.date.today()), headline_style)
        for course, results in courses_with_results:
            if course.state == "published":
                self.writec(course.name, course_style, cols=2)
            else:
                self.writec(course.name, course_unfinished_style, cols=2)

        self.writen()
        for course, results in courses_with_results:
            self.writec("Average", avg_style)
            self.writec("Variance", border_top_bottom_right_style)

        for questionnaire in questionnaires:
            self.writen(questionnaire.name, bold_style)
            for course, results in courses_with_results:
                self.writec(None, border_left_style)
                self.writec(None, border_right_style)

            for question_index, question in enumerate(questionnaire.question_set.all()):
                if question.is_text_question():
                    continue

                self.writen(question.text)

                for course, results in courses_with_results:
                    qn_results = results.get(questionnaire.id, None)
                    if qn_results:
                        values = []
                        variances = []
                        enough_answers = True
                        for contributor, data, grade in qn_results:
                            for grade_result in data:
                                if grade_result.question.id == question.id:
                                    if grade_result.average:
                                        values.append(grade_result.average)
                                        variances.append(grade_result.variance)
                                        if not grade_result.show:
                                            enough_answers = False
                                    break
                        if values and (enough_answers or all):
                            avg = sum(values) / len(values)
                            if avg < 1.5:
                                self.writec(avg, avg_style_very_good)
                            elif avg < 2.5:
                                self.writec(avg, avg_style_good)
                            elif avg < 3.5:
                                self.writec(avg, avg_style_medium)
                            elif avg < 4.5:
                                self.writec(avg, avg_style_bad)
                            else:
                                self.writec(avg, avg_style_very_bad)

                            var = sum(variances) / len(variances)
                            if var < 0.5:
                                self.writec(var, var_style_good)
                            elif var < 1:
                                self.writec(var, var_style_medium)
                            else:
                                self.writec(var, var_style_bad)
                        else:
                            self.writec(None, border_left_style)
                            self.writec(None, border_right_style)
                    else:
                        self.writec(None, border_left_style)
                        self.writec(None, border_right_style)
            self.writen(None)
            for course, results in courses_with_results:
                    self.writec(None, border_left_style)
                    self.writec(None, border_right_style)

        self.writen(_(u"Overall Grade"), bold_style)
        for course, results in courses_with_results:
            avg = calculate_average_grade(course)
            if avg:
                if avg < 1.5:
                    self.writec(avg, over_style_very_good, cols=2)
                elif avg < 2.5:
                    self.writec(avg, over_style_good, cols=2)
                elif avg < 3.5:
                    self.writec(avg, over_style_medium, cols=2)
                elif avg < 4.5:
                    self.writec(avg, over_style_bad, cols=2)
                else:
                    self.writec(avg, over_style_very_bad, cols=2)
            else:
                self.writec(None, border_left_style)
                self.writec(None, border_right_style)

        self.writen(_(u"Total Answers"), bold_style)
        for course, results in courses_with_results:
            self.writec(course.num_voters, total_answers_style, cols=2)

        self.workbook.save(response)
示例#54
0
class CachedFilesMixin(object):
    default_template = """url("%s")"""
    patterns = (
        ("*.css", (
            r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
            (r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
        )),
    )

    def __init__(self, *args, **kwargs):
        super(CachedFilesMixin, self).__init__(*args, **kwargs)
        try:
            self.cache = get_cache('mediafiles')
        except InvalidCacheBackendError:
            # Use the default backend
            self.cache = default_cache
        self._patterns = SortedDict()
        for extension, patterns in self.patterns:
            for pattern in patterns:
                if isinstance(pattern, (tuple, list)):
                    pattern, template = pattern
                else:
                    template = self.default_template
                compiled = re.compile(pattern)
                self._patterns.setdefault(extension, []).append((compiled, template))

    def file_hash(self, name, content=None):
        """
        Retuns a hash of the file with the given name and optional content.
        """
        if content is None:
            return None
        md5 = hashlib.md5()
        for chunk in content.chunks():
            md5.update(chunk)
        return md5.hexdigest()[:12]

    def hashed_name(self, name, content=None):
        parsed_name = urlsplit(unquote(name))
        clean_name = parsed_name.path.strip()
        opened = False
        if content is None:
            if not self.exists(clean_name):
                raise ValueError("The file '%s' could not be found with %r." %
                                 (clean_name, self))
            try:
                content = self.open(clean_name)
            except IOError:
                # Handle directory paths and fragments
                return name
            opened = True
        try:
            file_hash = self.file_hash(clean_name, content)
        finally:
            if opened:
                content.close()
        path, filename = os.path.split(clean_name)
        root, ext = os.path.splitext(filename)
        if file_hash is not None:
            file_hash = ".%s" % file_hash
        hashed_name = os.path.join(path, "%s%s%s" %
                                   (root, file_hash, ext))
        unparsed_name = list(parsed_name)
        unparsed_name[2] = hashed_name
        # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
        # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
        if '?#' in name and not unparsed_name[3]:
            unparsed_name[2] += '?'
        return urlunsplit(unparsed_name)

    def cache_key(self, name):
        return 'mediafiles:%s' % hashlib.md5(force_bytes(name)).hexdigest()

    def url(self, name, force=False):
        """
        Returns the real URL in DEBUG mode.
        """
        if settings.DEBUG and not force:
            hashed_name, fragment = name, ''
        else:
            clean_name, fragment = urldefrag(name)
            if urlsplit(clean_name).path.endswith('/'):  # don't hash paths
                hashed_name = name
            else:
                cache_key = self.cache_key(name)
                hashed_name = self.cache.get(cache_key)
                if hashed_name is None:
                    hashed_name = self.hashed_name(clean_name).replace('\\', '/')
                    # set the cache if there was a miss
                    # (e.g. if cache server goes down)
                    self.cache.set(cache_key, hashed_name)

        final_url = super(CachedFilesMixin, self).url(hashed_name)

        # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
        # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
        query_fragment = '?#' in name  # [sic!]
        if fragment or query_fragment:
            urlparts = list(urlsplit(final_url))
            if fragment and not urlparts[4]:
                urlparts[4] = fragment
            if query_fragment and not urlparts[3]:
                urlparts[2] += '?'
            final_url = urlunsplit(urlparts)

        return unquote(final_url)

    def url_converter(self, name, template=None):
        """
        Returns the custom URL converter for the given file name.
        """
        if template is None:
            template = self.default_template

        def converter(matchobj):
            """
            Converts the matched URL depending on the parent level (`..`)
            and returns the normalized and hashed URL using the url method
            of the storage.
            """
            matched, url = matchobj.groups()
            # Completely ignore http(s) prefixed URLs,
            # fragments and data-uri URLs
            if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
                return matched
            name_parts = name.split(os.sep)
            # Using posix normpath here to remove duplicates
            url = posixpath.normpath(url)
            url_parts = url.split('/')
            parent_level, sub_level = url.count('..'), url.count('/')
            if url.startswith('/'):
                sub_level -= 1
                url_parts = url_parts[1:]
            if parent_level or not url.startswith('/'):
                start, end = parent_level + 1, parent_level
            else:
                if sub_level:
                    if sub_level == 1:
                        parent_level -= 1
                    start, end = parent_level, 1
                else:
                    start, end = 1, sub_level - 1
            joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
            hashed_url = self.url(unquote(joined_result), force=True)
            file_name = hashed_url.split('/')[-1:]
            relative_url = '/'.join(url.split('/')[:-1] + file_name)

            # Return the hashed version to the file
            return template % unquote(relative_url)

        return converter

    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given SortedDict of files (called from collectmedia).

        Processing is actually two separate operations:

        1. renaming files to include a hash of their content for cache-busting,
           and copying those files to the target storage.
        2. adjusting files which contain references to other files so they
           refer to the cache-busting filenames.

        If either of these are performed on a file, then that file is considered
        post-processed.
        """
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return

        # where to store the new paths
        hashed_paths = {}

        # build a list of adjustable files
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        adjustable_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths.keys(), key=path_level, reverse=True):

            # use the original, local file, not the copied-but-unprocessed
            # file, which might be somewhere far away, like S3
            storage, path = paths[name]
            with storage.open(path) as original_file:

                # generate the hash with the original content, even for
                # adjustable files.
                hashed_name = self.hashed_name(name, original_file)

                # then get the original's file content..
                if hasattr(original_file, 'seek'):
                    original_file.seek(0)

                hashed_file_exists = self.exists(hashed_name)
                processed = False

                # ..to apply each replacement pattern to the content
                if name in adjustable_paths:
                    content = original_file.read().decode(settings.FILE_CHARSET)
                    for patterns in self._patterns.values():
                        for pattern, template in patterns:
                            converter = self.url_converter(name, template)
                            content = pattern.sub(converter, content)
                    if hashed_file_exists:
                        self.delete(hashed_name)
                    # then save the processed result
                    content_file = ContentFile(force_bytes(content))
                    saved_name = self._save(hashed_name, content_file)
                    hashed_name = force_text(saved_name.replace('\\', '/'))
                    processed = True
                else:
                    # or handle the case in which neither processing nor
                    # a change to the original file happened
                    if not hashed_file_exists:
                        processed = True
                        saved_name = self._save(hashed_name, original_file)
                        hashed_name = force_text(saved_name.replace('\\', '/'))

                # and then set the cache accordingly
                hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
                yield name, hashed_name, processed

        # Finally set the cache
        self.cache.set_many(hashed_paths)
示例#55
0
class CachedFilesMixin(object):
    patterns = (
        ("*.css", (
            r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
            r"""(@import\s*["']\s*(.*?)["'])""",
        )),
    )

    def __init__(self, *args, **kwargs):
        super(CachedFilesMixin, self).__init__(*args, **kwargs)
        try:
            self.cache = get_cache('staticfiles')
        except InvalidCacheBackendError:
            # Use the default backend
            self.cache = default_cache
        self._patterns = SortedDict()
        for extension, patterns in self.patterns:
            for pattern in patterns:
                compiled = re.compile(pattern)
                self._patterns.setdefault(extension, []).append(compiled)

    def hashed_name(self, name, content=None):
        if content is None:
            if not self.exists(name):
                raise ValueError("The file '%s' could not be found with %r." %
                                 (name, self))
            try:
                content = self.open(name)
            except IOError:
                # Handle directory paths
                return name
        path, filename = os.path.split(name)
        root, ext = os.path.splitext(filename)
        # Get the MD5 hash of the file
        md5 = hashlib.md5()
        for chunk in content.chunks():
            md5.update(chunk)
        md5sum = md5.hexdigest()[:12]
        return os.path.join(path, u"%s.%s%s" % (root, md5sum, ext))

    def cache_key(self, name):
        return u'staticfiles:cache:%s' % name

    def url(self, name, force=False):
        """
        Returns the real URL in DEBUG mode.
        """
        if settings.DEBUG and not force:
            return super(CachedFilesMixin, self).url(name)
        cache_key = self.cache_key(name)
        hashed_name = self.cache.get(cache_key)
        if hashed_name is None:
            hashed_name = self.hashed_name(name)
        return super(CachedFilesMixin, self).url(hashed_name)

    def url_converter(self, name):
        """
        Returns the custom URL converter for the given file name.
        """
        def converter(matchobj):
            """
            Converts the matched URL depending on the parent level (`..`)
            and returns the normalized and hashed URL using the url method
            of the storage.
            """
            matched, url = matchobj.groups()
            # Completely ignore http(s) prefixed URLs
            if url.startswith(('http', 'https')):
                return matched
            name_parts = name.split('/')
            # Using posix normpath here to remove duplicates
            url = posixpath.normpath(url)
            url_parts = url.split('/')
            parent_level, sub_level = url.count('..'), url.count('/')
            if url.startswith('/'):
                sub_level -= 1
                url_parts = url_parts[1:]
            if parent_level or not url.startswith('/'):
                start, end = parent_level + 1, parent_level
            else:
                if sub_level:
                    if sub_level == 1:
                        parent_level -= 1
                    start, end = parent_level, sub_level - 1
                else:
                    start, end = 1, sub_level - 1
            joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
            hashed_url = self.url(joined_result, force=True)
            # Return the hashed and normalized version to the file
            return 'url("%s")' % hashed_url
        return converter

    def post_process(self, paths, dry_run=False, **options):
        """
        Post process the given list of files (called from collectstatic).
        """
        processed_files = []
        # don't even dare to process the files if we're in dry run mode
        if dry_run:
            return processed_files

        # delete cache of all handled paths
        self.cache.delete_many([self.cache_key(path) for path in paths])

        # only try processing the files we have patterns for
        matches = lambda path: matches_patterns(path, self._patterns.keys())
        processing_paths = [path for path in paths if matches(path)]

        # then sort the files by the directory level
        path_level = lambda name: len(name.split(os.sep))
        for name in sorted(paths, key=path_level, reverse=True):

            # first get a hashed name for the given file
            hashed_name = self.hashed_name(name)

            with self.open(name) as original_file:
                # then get the original's file content
                content = original_file.read()

                # to apply each replacement pattern on the content
                if name in processing_paths:
                    converter = self.url_converter(name)
                    for patterns in self._patterns.values():
                        for pattern in patterns:
                            content = pattern.sub(converter, content)

                # then save the processed result
                if self.exists(hashed_name):
                    self.delete(hashed_name)

                saved_name = self._save(hashed_name, ContentFile(content))
                hashed_name = force_unicode(saved_name.replace('\\', '/'))
                processed_files.append(hashed_name)

                # and then set the cache accordingly
                self.cache.set(self.cache_key(name), hashed_name)

        return processed_files