Ejemplo n.º 1
0
def stats(request, service_id):
    """Create up some stats."""

    service = get_object_or_404(UserService, pk=service_id)    
    
    if check_is_service_id(service, MODULE_NAME):
        shares = get_items(request.user, date.today() - timedelta(days=7), service)
        sources = {}

        # count source websites
        for share in shares:
            if sources.has_key(share.source):
                sources[share.source] = sources[share.source] + 1
            else:
                sources[share.source] = 1

        sources = SortedDict(sorted(sources.items(),
                                    reverse=True, key=lambda x: x[1]))
        sources_reversed = SortedDict(sorted(sources.items(),
                                    reverse=False, key=lambda x: x[1]))

        return render(
            request,
            {'shares' : shares,
             'sources' : sources,
             'sources_reversed' : sources_reversed},
            service.template_name + '/stats.html'
        )
    else:
        return redirect('/%s' %(request.user.username))
Ejemplo n.º 2
0
    def _html_output(self, form_as, normal_row, help_text_html, sections_re, row_re):
        formsets = SortedDict()
        for bf in self.formsets:
            if bf.label:
                label = conditional_escape(force_unicode(bf.label))
                # Only add the suffix if the label does not end in
                # punctuation.
                if self.form.label_suffix:
                    if label[-1] not in ":?.!":
                        label += self.form.label_suffix
                label = label or ""
            else:
                label = ""
            if bf.field.help_text:
                help_text = help_text_html % force_unicode(bf.field.help_text)
            else:
                help_text = u""
            formsets[bf.name] = {"label": force_unicode(label), "field": unicode(bf), "help_text": help_text}

        try:
            output = []
            data = form_as()
            section_search = sections_re.search(data)
            if formsets:
                hidden = u"".join(hidden_re.findall(data))
                last_formset_name, last_formset = formsets.items()[-1]
                last_formset["field"] = last_formset["field"] + hidden
                formsets[last_formset_name] = normal_row % last_formset
                for name, formset in formsets.items()[:-1]:
                    formsets[name] = normal_row % formset

            if not section_search:
                output.append(data)
            else:
                section_groups = section_search.groups()
                for row, head, item in row_re.findall(section_groups[1]):
                    head_search = label_re.search(head)
                    if head_search:
                        id = head_search.groups()[1]
                        if formsets.has_key(id):
                            row = formsets[id]
                            del formsets[id]
                    output.append(row)

            for name, row in formsets.items():
                if name in self.form.fields.keyOrder:
                    output.append(row)

            return mark_safe(u"\n".join(output))
        except Exception, e:
            import traceback

            return traceback.format_exc()
Ejemplo n.º 3
0
def date_tree():
    """ returns a dict in the form {2012: [1,2,3,4,5,6], 2011: [10,11,12]} """
    today = datetime.date.today()
    first_day = date_of_first_entry()
    years = range(first_day.year, today.year+1)
    date_tree = SortedDict((year, range(1,13)) for year in years)
    for year, months in date_tree.items():
        if year == first_day.year:
            date_tree[year] = months[(first_day.month-1):]
        if year == today.year:
            # list might be missing some elements because it is also the first year.
            months_this_year = range(1, today.month+1)
            date_tree[year] = [m for m in date_tree[year] if m in months_this_year]
    return date_tree.items()
Ejemplo n.º 4
0
    def get_extra_context(self, *args, **kwargs):
        dls = list(Download.permitted.all())

        # create dictionary of categories
        cat_dict = SortedDict((id, {'parent': parent,
                                    'title': title,
                                    'items': [],
                                    'subcats': [],
                                    'slug': slug,
                                    'child_count': 0})
                              for (id, parent, title, slug) in
                              Category.objects.values_list('id', 'parent',
                                                           'title', 'slug'))
        # add None key for downloads without a category
        cat_dict[None] = {
            'parent': None,
            'items': [],
            'child_count': 0,
            'subcats': []
        }

        # add downloads to category item lists
        for dl in dls:
            cat_dict[dl.primary_category_id]['items'].append(dl)

        # nest subcategories
        for key, val in cat_dict.items():
            if val['parent']:
                cat_dict[val['parent']]['subcats'].append(key)
            child_count = len(val['items'])
            val['child_count'] += child_count
            if child_count > 0:
                parent_id = val['parent']
                while parent_id:
                    cat_dict[parent_id]['child_count'] += child_count
                    parent_id = cat_dict[parent_id]['parent']

        # remove categories with no items in them
        category_list = []
        for key, val in cat_dict.items():
            subcats = []
            for subcat in val['subcats']:
                if cat_dict[subcat]['child_count'] > 0:
                    subcats.append(cat_dict[subcat])
            val['subcats'] = subcats
            if val['child_count'] > 0 and not val['parent']:
                category_list.append(val)

        return {'title': _('Downloads'), 'category_list': category_list}
Ejemplo n.º 5
0
    def get_context_data(self, **kwargs):
        context_data = super(PageVisit, self).get_context_data(**kwargs)
        if not self.client:
            return context_data
        query = models.RequestEvent.objects.filter(client=self.client, created__range=[self.start_date, self.end_date])
        start_page = self.form.cleaned_data.get("start_page")
        if start_page:
            query = query.filter(path=start_page)
        try:
            target_page = query.values("path").annotate(visits=Count("pk")).order_by("-visits")[0]
        except IndexError:
            return context_data

        parent_pages = defaultdict(int)
        for parent in models.RequestEvent.objects.filter(
            created__range=[self.start_date, self.end_date], path=target_page["path"]
        ):
            key = ""
            if parent.referrer:
                parsed_url = urlparse(parent.referrer)
                key = "%s://%s%s" % (parsed_url.scheme, parsed_url.netloc, parsed_url.path)
            parent_pages[key] += 1
        parent_pages = SortedDict(sorted(parent_pages.items(), key=lambda x: x[1], reverse=True))
        child_pages = (
            models.RequestEvent.objects.filter(
                created__range=[self.start_date, self.end_date], referrer__endswith=target_page["path"]
            )
            .values("path")
            .annotate(visits=Count("pk"))
            .order_by("-visits")
        )
        page_info = {"target_page": target_page, "parent_pages": parent_pages, "child_pages": child_pages}
        context_data.update({"page_info": page_info, "start_page": start_page or target_page["path"]})
        return context_data
Ejemplo n.º 6
0
    def get_actions(self):
        labels, details = self._review_actions()

        actions = SortedDict()
        if self.review_type != "preliminary":
            actions["public"] = {
                "method": self.handler.process_public,
                "minimal": False,
                "label": _lazy("Push to public"),
            }

        actions["prelim"] = {"method": self.handler.process_preliminary, "label": labels["prelim"], "minimal": False}
        actions["reject"] = {"method": self.handler.process_sandbox, "label": _lazy("Reject"), "minimal": False}
        actions["info"] = {
            "method": self.handler.request_information,
            "label": _lazy("Request more information"),
            "minimal": True,
        }
        actions["super"] = {
            "method": self.handler.process_super_review,
            "label": _lazy("Request super-review"),
            "minimal": True,
        }
        for k, v in actions.items():
            v["details"] = details.get(k)

        return actions
Ejemplo n.º 7
0
    def __new__(cls, clsname, bases, attrs):
        fields_present = set()
        fieldsets = SortedDict()
        fieldset_help = {}

        fieldset_sets = []
        if attrs.has_key('fieldsets'):
            fieldset_sets.append(attrs['fieldsets'])
        fieldset_sets += [clazz.fieldsets for clazz in bases if hasattr(clazz, 'fieldsets')]
        for fieldset in fieldset_sets:
            for values in fieldset:
                if len(values) == 3:
                    name, help_text, fields = values
                    fieldset_help[name] = fieldset_help.get(name, help_text)
                else:
                    name, fields = values
                fields = [f for f in fields if f not in fields_present]
                fields_present.update(fields)
                if not fields:
                    continue

                if fieldsets.has_key(name):
                    fieldsets[name] = fieldsets[name] + fields
                else:
                    fieldsets[name] = fields

        if fieldsets.has_key(None):
            del fieldsets[None]
    
        attrs['fieldsets'] = fieldsets.items()
        attrs['fieldset_help'] = fieldset_help

        return super(FieldsetMetaclass, cls).__new__(cls, clsname, bases, attrs)
Ejemplo n.º 8
0
class BindingDict(object):
    """
    This dict-like object is used to store fields on a serializer.

    This ensures that whenever fields are added to the serializer we call
    `field.bind()` so that the `field_name` and `parent` attributes
    can be set correctly.
    """
    def __init__(self, serializer):
        self.serializer = serializer
        self.fields = SortedDict()

    def __setitem__(self, key, field):
        self.fields[key] = field
        field.bind(field_name=key, parent=self.serializer)

    def __getitem__(self, key):
        return self.fields[key]

    def __delitem__(self, key):
        del self.fields[key]

    def items(self):
        return self.fields.items()

    def keys(self):
        return self.fields.keys()

    def values(self):
        return self.fields.values()
Ejemplo n.º 9
0
    def get_stats_items(self, since):
        """Stubbed out for now"""

        checkins = self.get_items(since)

        categories = {}
        
        mayorships = {}
        
        for checkin in checkins:

            if hasattr(checkin, 'is_mayor'):
                if mayorships.has_key(checkin.title):
                    mayorships[checkin.title]['count'] += 1
                else:
                    mayorships[checkin.title] = {'venue' : checkin,
                                                 'count' : 1
                                                 }
                    
            if hasattr(checkin, 'categories'):
                if categories.has_key(checkin.categories[0]['name']):
                    categories[checkin.categories[0]['name']]['count'] += 1
                else:
                    if hasattr(checkin, 'icon'):
                        icon = checkin.icon
                    else:
                        icon = checkin.categories[0]['name']
                    categories[checkin.categories[0]['name']] = {
                        'icon' : checkin.icon,
                        'count' : 1
                        }
                    
        categories = SortedDict(sorted(categories.items(), reverse=True, key=lambda x: x[1]))
                    
        return checkins, categories, mayorships
Ejemplo n.º 10
0
 def js_options(self):
     options = deepcopy(self._meta.options)
     columns = self.bound_columns
     aoColumnDefs = options.setdefault('aoColumnDefs', [])
     colopts = SortedDict()
     for index, name in enumerate(columns.keys()):
         column = columns[name]
         for key, value in column.options.items():
             if not (key, str(value)) in colopts.keys():
                 colopts[(key, str(value))] = {}
                 colopts[(key, str(value))]['targets'] = []
             colopts[(key, str(value))]['targets'] = colopts[(key, str(value))]['targets'] + [index]
             colopts[(key, str(value))]['key'] = key
             colopts[(key, str(value))]['value'] = value
         if column.sort_field != column.display_field and column.sort_field in columns:
             key = 'iDataSort'
             value = columns.keys().index(column.sort_field)
             if not (key, str(value)) in colopts.keys():
                 colopts[(key, str(value))] = {}
                 colopts[(key, str(value))]['targets'] = []
             colopts[(key, str(value))]['targets'] = colopts[(key, str(value))]['targets'] + [index]
             colopts[(key, str(value))]['key'] = key
             colopts[(key, str(value))]['value'] = value
     for kv, values in colopts.items():
         aoColumnDefs.append(dict([(values['key'], values['value']), ('aTargets', values['targets'])]))
     options['sAjaxSource'] = reverse_ajax_source(options.get('sAjaxSource'))
     return mark_safe(dumpjs(options, indent=4, sort_keys=True))
Ejemplo n.º 11
0
    def export(self):
        swanpardict = SortedDict(self.get_fields())
        # clean up the common infomation
        del swanpardict['ID']
        del swanpardict['name']
        del swanpardict['user']
        del swanpardict['group']
        del swanpardict['time created']
        del swanpardict['last modified']

        # reconstruct COMPUTE line before deleting
        swanpardict['compute'] = "nonst %s %s hr %s" % (
            swanpardict['time start'], swanpardict['time interval'], swanpardict['time end'])
        del swanpardict['time start']
        del swanpardict['time end']
        del swanpardict['time interval']

        if (self.swanconfig):
            swanpar_dir = "%s/%s" % (MEDIA_ROOT, self.swanconfig.model_input.input_dir)
            swanpar_file = os.path.abspath('%s/%s' % (swanpar_dir, self.name))
            log.into(swanpar_file)

            if os.path.exists(swanpar_file):
                os.remove(swanpar_file)

            f = open(swanpar_file, 'w')

            for name, value in swanpardict.items():
                f.write("%s %s\n" % (name, value))
            f.write('stop')
            f.close()
        else:
            log.warn("no need to export without a model input associated with a parameter file")
Ejemplo n.º 12
0
def get_language_config(content_language=None):
    language = get_language()[:2]
    if content_language:
        content_language = content_language[:2]
    else:
        content_language = language

    config = {}
    config['language'] = language

    lang_names = SortedDict()
    for lang, name in settings.LANGUAGES:
        if lang[:2] not in lang_names: lang_names[lang[:2]] = []
        lang_names[lang[:2]].append(_(name))
    sp_langs = []
    for lang, names in lang_names.items():
        if lang == content_language:
            default = '+'
        else:
            default = ''
        sp_langs.append(u'%s%s=%s' % (default, ' / '.join(names), lang))

    config['spellchecker_languages'] = ','.join(sp_langs)

    if content_language in settings.LANGUAGES_BIDI:
        config['directionality'] = 'rtl'
    else:
        config['directionality'] = 'ltr'

    if tinymce_settings.USE_SPELLCHECKER:
        config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')

    return config
Ejemplo n.º 13
0
	def get_favored_results(self, error=5, threshhold=None):
		"""
		Calculates the set of most-favored results based on their weight. Evenly-weighted results will be grouped together and either added or excluded as a group.
		
		:param error: An arbitrary number; higher values will cause this method to be more reticent about adding new items to the favored results.
		:param threshhold: Will be passed directly into :meth:`get_weighted_results`
		
		"""
		if not hasattr(self, '_favored_results'):
			results = self.get_weighted_results(threshhold)
			
			grouped_results = SortedDict()
			
			for result in results:
				grouped_results.setdefault(result.weight, []).append(result)
			
			self._favored_results = []
			
			for value, subresults in grouped_results.items():
				cost = error * sum([(value - result.weight)**2 for result in self._favored_results])
				if value > cost:
					self._favored_results += subresults
				else:
					break
			if len(self._favored_results) == len(results):
				self._favored_results = []
		return self._favored_results
Ejemplo n.º 14
0
 def SearchCode(self, event):  # wxGlade: ReadCodeBrowser.<event_handler>
     #get all matching codes
     search_results_header = 'All other codes found...'
     search_codes_list = list(ReadCode.objects.filter(concept_status='C'
                                             ).filter(synonyms__term_30__icontains=self.search_text.GetValue()
                                             ).distinct())
     code_id_list = [x.code for x in search_codes_list]
     final_codes = SortedDict()
     for (k,v) in self.preferred_codes:
         final_codes[k] = []
     final_codes[search_results_header] = []
     font = self.search_results.GetFont()
     font.SetWeight(wx.FONTWEIGHT_BOLD)
     for c in search_codes_list:
         #first look in the preferred codes, and add it to that 
         for (k,v) in self.preferred_codes:
             if c.code in v:
                 final_codes[k].append(c)
         #now look to see if it has any parents in the rest of the codes
         parents = set(c.get_ancestor_codes().split(','))
         parents.discard(c.code)
         for parent in parents:
             if parent in code_id_list:
                 #this code has a matching parent - do not include
                 break
         else:
             final_codes[search_results_header].append(c)
     self.search_results.Clear()
     for (k,v) in final_codes.items():
         if final_codes[k]:
             item  = self.search_results.Append(k)
             self.search_results.SetItemFont(item,font)
             for f in final_codes[k]:
                 self.search_results.Append(unicode(f),f)
Ejemplo n.º 15
0
 def js_options(self):
     options = deepcopy(self._meta.options)
     aoColumnDefs = options.setdefault('aoColumnDefs', [])
     colopts = SortedDict()
     for index, bcol in enumerate(self.bound_columns.values()):
         for key, value in bcol.options.items():
             if not (key, str(value)) in colopts.keys():
                 colopts[(key, str(value))] = {}
                 colopts[(key, str(value))]['targets'] = []
             coltargets = colopts[(key, str(value))]['targets'] + [index]
             colopts[(key, str(value))]['targets'] = coltargets
             colopts[(key, str(value))]['key'] = key
             colopts[(key, str(value))]['value'] = value
         if bcol.sort_field not in self.bound_columns:
             continue
         if bcol.sort_field == bcol.display_field:
             continue
         key = 'iDataSort'
         value = self.bound_columns.keys().index(bcol.sort_field)
         if not (key, str(value)) in colopts.keys():
             colopts[(key, str(value))] = {}
             colopts[(key, str(value))]['targets'] = []
         coltargets = colopts[(key, str(value))]['targets'] + [index]
         colopts[(key, str(value))]['targets'] = coltargets
         colopts[(key, str(value))]['key'] = key
         colopts[(key, str(value))]['value'] = value
     for kv, values in colopts.items():
         aoColumnDefs.append(dict([(values['key'], values['value']),
                                   ('aTargets', values['targets'])]))
     return mark_safe(dumpjs(options, indent=4, sort_keys=True))
Ejemplo n.º 16
0
    def get_actions(self, request, addon):
        labels, details = self._review_actions()

        actions = SortedDict()
        if not addon.admin_review or acl.action_allowed(request, "ReviewerAdminTools", "View"):
            if self.review_type != "preliminary":
                actions["public"] = {
                    "method": self.handler.process_public,
                    "minimal": False,
                    "label": _lazy("Push to public"),
                }
            actions["prelim"] = {
                "method": self.handler.process_preliminary,
                "label": labels["prelim"],
                "minimal": False,
            }
            actions["reject"] = {"method": self.handler.process_sandbox, "label": _lazy("Reject"), "minimal": False}
        actions["info"] = {
            "method": self.handler.request_information,
            "label": _lazy("Request more information"),
            "minimal": True,
        }
        actions["super"] = {
            "method": self.handler.process_super_review,
            "label": _lazy("Request super-review"),
            "minimal": True,
        }
        actions["comment"] = {"method": self.handler.process_comment, "label": _lazy("Comment"), "minimal": True}
        for k, v in actions.items():
            v["details"] = details.get(k)

        return actions
Ejemplo n.º 17
0
class ZipCodeFilterSpec(FilterSpec):
    def __init__(self, request, params, model, model_admin):
        super(ZipCodeFilterSpec, self).__init__(request, params, model, model_admin)
        self.links = SortedDict((("Any", "zip_any"), ("90***", "zip_90"), ("75***", "zip_75")))

    def consumed_params(self):
        return self.links.values()

    def choices(self, cl):
        selected = [v for v in self.links.values() if self.params.has_key(v)]
        for title, key in self.links.items():
            yield {
                "selected": (self.params.has_key(key) or (key == "zip_any" and selected == [])),
                "query_string": cl.get_query_string({key: 1}, selected),
                "display": title,
            }

    def get_query_set(self, cl, qs):
        if self.params.has_key("zip_90"):
            return qs.filter(locations__zip_code__startswith="90").distinct()
        if self.params.has_key("zip_75"):
            return qs.filter(locations__zip_code__startswith="75").distinct()
        return qs

    def title(self):
        return u"Zip code"
Ejemplo n.º 18
0
class CompanyOpenFilterSpec(FilterSpec):
    def __init__(self, request, params, model, model_admin):
        super(CompanyOpenFilterSpec, self).__init__(request, params, model, model_admin)
        self.links = SortedDict((("Any", "open_any"), ("Weekdays", "open_weedays"), ("Weekends", "open_weekends")))

    def consumed_params(self):
        return self.links.values()

    def choices(self, cl):
        selected = [v for v in self.links.values() if self.params.has_key(v)]
        for title, key in self.links.items():
            yield {
                "selected": (self.params.has_key(key) or (key == "open_any" and selected == [])),
                "query_string": cl.get_query_string({key: 1}, selected),
                "display": title,
            }

    def get_query_set(self, cl, qs):
        if self.params.has_key("open_weedays"):
            return qs.filter(locations__open_days__contains="12345").distinct()
        if self.params.has_key("open_weekends"):
            return qs.filter(locations__open_days__contains="67").distinct()
        return qs

    def title(self):
        return u"Locations open?"
Ejemplo n.º 19
0
def stats(request, service_id):
    """Create up some stats.
    """

    service = get_object_or_404(UserService, pk=service_id)

    if check_is_service_id(service, PACKAGE_NAME):
        shares = service.handler.get_items(date.today() - timedelta(days=7))
        sources = {}

        # Count source websites
        for share in shares:
            if sources.has_key(share.source):
                sources[share.source] = sources[share.source] + 1
            else:
                sources[share.source] = 1

        sources = SortedDict(
            sorted(sources.items(), reverse=True, key=lambda x: x[1])
        )

        return render(
            request,
            {
                'shares' : shares,
                'sources' : sources,
            },
            'causal/googlereader/stats.html'
        )
    else:
        return redirect('/%s' % (request.user.username,))
Ejemplo n.º 20
0
def get_language_config(content_language=None):
    language = get_language()[:2]
    if content_language:
        content_language = content_language[:2]
    else:
        content_language = language

    config = {}
    config["language"] = language

    lang_names = SortedDict()
    for lang, name in settings.LANGUAGES:
        if lang[:2] not in lang_names:
            lang_names[lang[:2]] = []
        lang_names[lang[:2]].append(_(name))
    sp_langs = []
    for lang, names in lang_names.items():
        if lang == content_language:
            default = "+"
        else:
            default = ""
        sp_langs.append(u"%s%s=%s" % (default, " / ".join(names), lang))

    config["spellchecker_languages"] = ",".join(sp_langs)

    if content_language in settings.LANGUAGES_BIDI:
        config["directionality"] = "rtl"
    else:
        config["directionality"] = "ltr"

    if tinymce.settings.USE_SPELLCHECKER:
        config["spellchecker_rpc_url"] = reverse("tinymce.views.spell_check")

    return config
Ejemplo n.º 21
0
def dump_data(request,appname):
    app_list = SortedDict()
    
    try:
        if request.POST:
            for appname in request.POST.getlist('apps'):
                app = get_app(appname)
                app_list[app] = None
            appname = 'choices'
        else:
            app = get_app(appname)
            app_list[app] = None
    except ImproperlyConfigured:
        if appname == 'all':
            for app in get_apps():
                app_list[app] = None

    if(len(app_list) > 0):
        objects = []
        for model in sort_dependencies(app_list.items()):
            if not model._meta.proxy and router.allow_syncdb(DEFAULT_DB_ALIAS, model):
                objects.extend(model._default_manager.using(DEFAULT_DB_ALIAS).all())
        serializers.get_serializer('json')
        json = serializers.serialize('json', objects, indent=2,use_natural_keys=True)
        response = HttpResponse(json, mimetype='application/json');
        response['Content-Disposition'] = 'attachment; filename=%s_%s_fixture.json' % (date.today().__str__(),appname)
        return response

    return render_to_response('diagnostic/dumpdata.html',context_instance=RequestContext(request))
Ejemplo n.º 22
0
def history(request):
    """
    Tally total expenses and income for each month
    """
    history = SortedDict()
    entries = Entry.objects.all().order_by('-date')
    for e in entries:
        
        # Create dict key
        this_month = datetime.date(e.date.year, e.date.month, 1)
        if not history.has_key(this_month):
            history[this_month] = {'income':0, 'expense':0}
            
        #sum values for month
        if e.category.type in ['EXP', 'COGS']:
            history[this_month]['expense'] += e.amount
        elif e.category.type == 'INC':
             history[this_month]['income'] += e.amount

    
    for date, value_dict in history.items():
        value_dict['net'] = value_dict['income'] - value_dict['expense']

    return simple.direct_to_template(request,
                                     template='beancounter/history.html',
                                     extra_context = { 'history': history })
Ejemplo n.º 23
0
 def get_context_data(self, **kwargs):
     context_data = super(Referrer, self).get_context_data(**kwargs)
     if not self.client:
         return context_data
     referrer_query = models.RequestEvent.objects.filter(
         client=self.client,
         created__range=[self.start_date, self.end_date]
     ).exclude(referrer=None).exclude(referrer='')
     for domain in self.client.domain_set.all():
         referrer_query = referrer_query.exclude(
             referrer__regex='^https?://.*%s/?.*' % domain.pattern
         )
     referrer_query = referrer_query.values('referrer').annotate(
         referrals=Count('pk')
     )
     referrer_data = defaultdict(int)
     for data in referrer_query:
         parse_result = urlparse(data['referrer'])
         path = parse_result.path or '/'
         referrer_data[
             '%s://%s%s' % (
                 parse_result.scheme, parse_result.netloc, path
             )
         ] += data['referrals']
     referrer_data = SortedDict(
         sorted(referrer_data.items(), key=lambda x: x[1], reverse=True)
     )
     context_data['referrer_data'] = referrer_data
     return context_data
Ejemplo n.º 24
0
 def get_context_data(self, **kwargs):
     context_data = super(ExitPage, self).get_context_data(**kwargs)
     if not self.client:
         return context_data
     exit_page_data = defaultdict(int)
     sessions_query = models.RequestEvent.objects.values(
         'tracking_key'
     ).annotate(new_date=Min('created')).filter(
         new_date__range=[self.start_date, self.end_date],
         client=self.client
     )
     sessions = list(data['tracking_key'] for data in sessions_query)
     for exit_page_row in models.RequestEvent.objects.filter(
         tracking_key__in=sessions
     ).values('path', 'tracking_key').order_by(
         'tracking_key', '-created'
     ):
         if exit_page_row['tracking_key'] in sessions:
             exit_page_data[exit_page_row['path']] += 1
             sessions.pop(sessions.index(exit_page_row['tracking_key']))
     exit_page_data = SortedDict(
         sorted(exit_page_data.items(), key=lambda x: x[1], reverse=True)
     )
     context_data.update({
         'exit_page_data': exit_page_data
     })
     return context_data
Ejemplo n.º 25
0
def build_kml(view, kml_type, id, kml_args_dict):
    '''builds a dynamic KML file'''
    # n.b.: kml_args_dict containts direct USER input
    # This means that we should process input as unsafe in the factory methods...

    # ensure kml_args_dict is sorted, so URLS always come out the same
    # this help Google Earth's caching
    kml_args_dict = SortedDict(sorted(kml_args_dict.items()))
    template_context = {
        'view': view,
        'kml_args': urllib.urlencode(kml_args_dict)
    }
    if kml_type == 'transect' and id is not None:
        transect = makejarkustransect(id)
        extra_context = build_transect_context(transect, kml_args_dict)
        template_context.update(extra_context)
    elif kml_type == 'lod':
        lod = makejarkuslod()
        extra_context = build_lod_context(lod, kml_args_dict)
        template_context.update(extra_context)
    elif kml_type == 'style':
        extra_context = build_style_context(kml_args_dict)
        template_context.update(extra_context)
    else:
        raise Exception('KML type not supported')
    return render_to_kmz("kml/{0}.kml".format(kml_type), template_context)
Ejemplo n.º 26
0
    def get_actions(self):
        labels, details = self._review_actions()

        actions = SortedDict()
        if self.review_type != 'preliminary':
            actions['public'] = {'method': self.handler.process_public,
                                 'minimal': False,
                                 'label': _lazy('Push to public')}

        actions['prelim'] = {'method': self.handler.process_preliminary,
                             'label': labels['prelim'],
                             'minimal': False}
        actions['reject'] = {'method': self.handler.process_sandbox,
                             'label': _lazy('Reject'),
                             'minimal': False}
        actions['info'] = {'method': self.handler.request_information,
                           'label': _lazy('Request more information'),
                           'minimal': True}
        actions['super'] = {'method': self.handler.process_super_review,
                            'label': _lazy('Request super-review'),
                            'minimal': True}
        actions['comment'] = {'method': self.handler.process_comment,
                              'label': _lazy('Comment'),
                              'minimal': True}
        for k, v in actions.items():
            v['details'] = details.get(k)

        return actions
Ejemplo n.º 27
0
def make_basic_chart_json(
        values,
        chart_type='pie',
        chart_tip='#val#',
        chart_nolabels=True,
        chart_make_value=default_make_value,
        chart_params=None,
        elements_params=None,
        ):
    # Sorts from biggest to smallest
    values = values.items()
    values.sort(lambda a,b: cmp(b[1], a[1]))
    values = SortedDict([(k,v) for k,v in values if v])
    
    data = {"bg_colour": "#ffffff",
            "elements": [{
              "type": chart_type,
              "tip": _(chart_tip),
              "no-labels": chart_nolabels,
              "colours": [random_color() for i in values.values()],
              "values": [chart_make_value(num, i) for num, i in enumerate(values.items())],
              "alpha": 0.7,
              }],
            }

    if chart_params:
        data.update(chart_params)

    if elements_params:
        for e in data['elements']:
            e.update(elements_params)

    return HttpResponse(simplejson.dumps(data, cls=BIEncoder), mimetype="text/javascript")
Ejemplo n.º 28
0
    def get_actions(self, request, addon):
        actions = SortedDict()
        if request is None:
            # If request is not set, it means we are just (ab)using the
            # ReviewHelper for its `handler` attribute and we don't care about
            # the actions.
            return actions
        labels, details = self._review_actions()
        reviewable_because_complete = addon.status not in (
            amo.STATUS_NULL, amo.STATUS_DELETED)
        reviewable_because_admin = (
            not addon.admin_review or
            acl.action_allowed(request, 'ReviewerAdminTools', 'View'))
        reviewable_because_submission_time = (
            not is_limited_reviewer(request) or
            (addon.latest_version is not None and
                addon.latest_version.nomination is not None and
                (datetime.datetime.now() - addon.latest_version.nomination >=
                    datetime.timedelta(hours=REVIEW_LIMITED_DELAY_HOURS))))
        reviewable_because_pending = addon.latest_version is not None and (
            len(addon.latest_version.is_unreviewed) > 0 or
            addon.status == amo.STATUS_LITE_AND_NOMINATED)
        if (reviewable_because_complete and
                reviewable_because_admin and
                reviewable_because_submission_time and
                reviewable_because_pending):
            if self.review_type != 'preliminary':
                if addon.is_listed:
                    label = _lazy('Push to public')
                else:
                    label = _lazy('Grant full review')
                actions['public'] = {'method': self.handler.process_public,
                                     'minimal': False,
                                     'label': label}
            # An unlisted sideload add-on, which requests a full review, cannot
            # be granted a preliminary review.
            prelim_allowed = not waffle.flag_is_active(
                request, 'no-prelim-review') and addon.is_listed
            if prelim_allowed or self.review_type == 'preliminary':
                actions['prelim'] = {
                    'method': self.handler.process_preliminary,
                    'label': labels['prelim'],
                    'minimal': False}
            actions['reject'] = {'method': self.handler.process_sandbox,
                                 'label': _lazy('Reject'),
                                 'minimal': False}
        actions['info'] = {'method': self.handler.request_information,
                           'label': _lazy('Request more information'),
                           'minimal': True}
        actions['super'] = {'method': self.handler.process_super_review,
                            'label': _lazy('Request super-review'),
                            'minimal': True}
        actions['comment'] = {'method': self.handler.process_comment,
                              'label': _lazy('Comment'),
                              'minimal': True}
        for k, v in actions.items():
            v['details'] = details.get(k)

        return actions
Ejemplo n.º 29
0
 def get_external_url(self):
     parts = urlparse.urlparse(self.external_url)
     # for avoiding IE errors we have to parse the URL query string to set wmode=transparent
     scheme, netloc, path, params, query, fragment = parts[:6]
     querydict = SortedDict(parse_qsl(query))
     querydict['wmode'] = 'transparent'
     query = '&'.join(['%s=%s' % (key, value) for (key, value) in querydict.items()])
     return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
Ejemplo n.º 30
0
 def log_header(self, *args, **kwargs):
     if not self._log_headers:
         return ''
     lhd = SortedDict([(k, v) for k, v in self.log_header_dict(*args, **kwargs).items() if v is not None])
     if not lhd:
         return ''
     header = '|'.join('%s:%s' % item for item in lhd.items())
     return '{%s} ' % (header.strip())
Ejemplo n.º 31
0
    def get_actions(self):
        labels, details = self._review_actions()

        actions = SortedDict()
        if self.review_type != 'preliminary':
            actions['public'] = {
                'method': self.handler.process_public,
                'minimal': False,
                'label': _lazy('Push to public')
            }

        if not self.addon.is_premium():
            actions['prelim'] = {
                'method': self.handler.process_preliminary,
                'label': labels['prelim'],
                'minimal': False
            }

        actions['reject'] = {
            'method': self.handler.process_sandbox,
            'label': _lazy('Reject'),
            'minimal': False
        }
        actions['info'] = {
            'method': self.handler.request_information,
            'label': _lazy('Request more information'),
            'minimal': True
        }
        actions['super'] = {
            'method': self.handler.process_super_review,
            'label': _lazy('Request super-review'),
            'minimal': True
        }
        actions['comment'] = {
            'method': self.handler.process_comment,
            'label': _lazy('Comment'),
            'minimal': True
        }
        for k, v in actions.items():
            v['details'] = details.get(k)

        return actions
Ejemplo n.º 32
0
    def get_fields(self):
        """
        Returns the complete set of fields for the object as a dict.

        This will be the set of any explicitly declared fields,
        plus the set of fields returned by get_default_fields().
        """
        ret = SortedDict()

        # Get the explicitly declared fields
        base_fields = copy.deepcopy(self.base_fields)
        for key, field in base_fields.items():
            ret[key] = field

        # Add in the default fields
        default_fields = self.get_default_fields()
        for key, val in default_fields.items():
            if key not in ret:
                ret[key] = val

        # If 'fields' is specified, use those fields, in that order.
        if self.opts.fields:
            assert isinstance(
                self.opts.fields,
                (list, tuple)), '`fields` must be a list or tuple'
            new = SortedDict()
            for key in self.opts.fields:
                new[key] = ret[key]
            ret = new

        # Remove anything in 'exclude'
        if self.opts.exclude:
            assert isinstance(
                self.opts.exclude,
                (list, tuple)), '`exclude` must be a list or tuple'
            for key in self.opts.exclude:
                ret.pop(key, None)

        for key, field in ret.items():
            field.initialize(parent=self, field_name=key)

        return ret
Ejemplo n.º 33
0
def _performance_by_month(user_id, months=12, end_month=None, end_year=None):
    monthly_data = SortedDict()

    now = datetime.now()
    if not end_month:
        end_month = now.month
    if not end_year:
        end_year = now.year

    end_time = time.mktime((end_year, end_month + 1, 1, 0, 0, 0, 0, 0, -1))
    start_time = time.mktime((end_year, end_month + 1 - months,
                              1, 0, 0, 0, 0, 0, -1))

    sql = (PerformanceGraph.objects
          .filter_raw('log_activity.created >=',
                      date.fromtimestamp(start_time).isoformat())
          .filter_raw('log_activity.created <',
                      date.fromtimestamp(end_time).isoformat())
          )

    for row in sql.all():
        label = row.approval_created.isoformat()[:7]

        if not label in monthly_data:
            xaxis = row.approval_created.strftime('%b %Y')
            monthly_data[label] = dict(teamcount=0, usercount=0,
                                       teamamt=0, label=xaxis)

        monthly_data[label]['teamamt'] = monthly_data[label]['teamamt'] + 1
        monthly_data_count = monthly_data[label]['teamcount']
        monthly_data[label]['teamcount'] = monthly_data_count + row.total

        if row.user_id == user_id:
            user_count = monthly_data[label]['usercount']
            monthly_data[label]['usercount'] = user_count + row.total

    # Calculate averages
    for i, vals in monthly_data.items():
        average = round(vals['teamcount'] / float(vals['teamamt']), 1)
        monthly_data[i]['teamavg'] = str(average)  # floats aren't valid json

    return monthly_data
Ejemplo n.º 34
0
def profile(request, username, template="accounts/account_profile.html"):
    """
    Display a profile.
    """
    profile_user = get_object_or_404(User, username=username, is_active=True)
    profile_fields = SortedDict()
    Profile = get_profile_model()
    if Profile is not None:
        profile = profile_user.get_profile()
        user_fieldname = get_profile_user_fieldname()
        exclude = tuple(settings.ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS)
        for field in Profile._meta.fields:
            if field.name not in ("id", user_fieldname) + exclude:
                value = getattr(profile, field.name)
                profile_fields[field.verbose_name.title()] = value
    context = {
        "profile_user": profile_user,
        "profile_fields": profile_fields.items(),
    }
    return render(request, template, context)
def profile_fields(user):
    """
    Returns profile fields as a dict for the given user. Used in the
    profile view template when the ``ACCOUNTS_PROFILE_VIEWS_ENABLED``
    setting is set to ``True``, and also in the account approval emails
    sent to administrators when the ``ACCOUNTS_APPROVAL_REQUIRED``
    setting is set to ``True``.
    """
    fields = SortedDict()
    try:
        profile = get_profile_for_user(user)
        user_fieldname = get_profile_user_fieldname()
        exclude = tuple(settings.ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS)
        for field in profile._meta.fields:
            if field.name not in ("id", user_fieldname) + exclude:
                value = getattr(profile, field.name)
                fields[field.verbose_name.title()] = value
    except ProfileNotConfigured:
        pass
    return list(fields.items())
Ejemplo n.º 36
0
    def extra_context(self):
        """
        Allows the addition of more context variables as needed.

        Must return a dictionary.
        """
        documents = SortedDict()
        for r in self.results:
            if r.document_id in documents:
                documents[r.document_id]['pages'].append(r.object)
            else:
                documents[r.document_id] = {
                    'id': r.object.document.id,
                    'document': r.object.document.document,
                    'pages': [r.object]
                }

        paginator = Paginator(documents.items(), 5)
        try:
            page = self.request.GET.get('pag')
            docs = paginator.page(page)
        except PageNotAnInteger:
            # If page is not an integer, deliver first page.
            docs = paginator.page(1)
        except EmptyPage:
            # If page is out of range (e.g. 9999),
            # deliver last page of   results.
            docs = paginator.page(paginator.num_pages)

        cp = self.request.GET.copy()
        if 'pag' in cp:
            cp.pop('pag')

        return {
            'docs': docs,
            'total': len(documents),
            'vs_query': self.vs_query,
            'refs_fields': None,
            'url_query': cp.urlencode,
            'tags': json.dumps([tag.name for tag in Tag.objects.all()]),
        }
Ejemplo n.º 37
0
def data_dump(request):
    if not request.user.is_authenticated() or not request.user.is_superuser:
        response = HttpResponse(json.dumps({"Error": "Not Authorized"}))
        response['Content-type'] = 'application/json'
        return response

    #if wanted all apps, but only want campus
    #app_list = SortedDict([(app, None) for app in get_apps()])
    app_list = SortedDict([(get_app('campus'), None)])

    # Now collate the objects to be serialized.
    objects = []

    # Needed because sqllite doesn't use
    def ordering(self):
        if hasattr(self, 'name'):
            return str(self.name).lower()
        elif hasattr(self, 'id'):
            return self.id
        else:
            return self.pk

    for model in sort_dependencies(app_list.items()):
        # skip groupedlocation model (not needed since Group uses natural keys)
        if model == GroupedLocation:
            continue
        # - make ordering case insensitive
        # - must also make special case for MapObj else the leaf class will be
        #   serialized, not the actual MapObj itself
        if model == MapObj:
            objects.extend(sorted(model.objects.mob_filter(), key=ordering))
        else:
            objects.extend(sorted(model.objects.all(), key=ordering))
    try:
        data = serializers.serialize('json',
                                     objects,
                                     indent=4,
                                     use_natural_keys=True)
    except Exception, e:
        data = serializers.serialize('json', "ERORR!")
Ejemplo n.º 38
0
def get_language_config(content_language=None):
    """Get language config."""

    language = get_language()[:2]

    if content_language:
        content_language = content_language[:2]
    else:
        content_language = language

    config = {}
    config['language'] = language

    lang_names = SortedDict()
    for lang, name in settings.LANGUAGES:
        if lang[:2] not in lang_names:
            lang_names[lang[:2]] = []

        lang_names[lang[:2]].append(_(name))

    sp_langs = []

    for lang, names in lang_names.items():
        if lang == content_language:
            default = '+'
        else:
            default = ''
        sp_langs.append('%s%s=%s' % (default, ' / '.join(names), lang))

    config['spellchecker_languages'] = ','.join(sp_langs)

    if content_language in settings.LANGUAGES_BIDI:
        config['directionality'] = 'rtl'
    else:
        config['directionality'] = 'ltr'

    if tinymce_settings.USE_SPELLCHECKER:
        config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')

    return config
Ejemplo n.º 39
0
Archivo: admin.py Proyecto: jinkt/BorIS
    def __call__(self, request, tab_class=None):
        interface = self.interface_class()
        tabs = SortedDict()

        for t in interface.tabs:
            tab = t()
            if tab_class == t and request.method == 'POST':
                form = tab.form(request.POST, prefix=tab.form_prefix)
                if form.is_valid():
                    cleaned_data = form.cleaned_data
                    display_type = cleaned_data.pop('display')

                    return ReportResponse(tab.report,
                                          request,
                                          display_type,
                                          **cleaned_data)
            else:
                form = tab.form(prefix=tab.form_prefix)
            tabs[tab] = form

        ctx = {'tabs': tabs.items(), 'interface': interface, 'name': self.title}
        return render(request, 'reporting/interface.html', ctx)
Ejemplo n.º 40
0
    def faceted_filters(self):
        url = QueryURLObject(self.url)
        filter_mapping = SortedDict(
            (filter_['slug'], filter_) for filter_ in self.serialized_filters)

        filter_groups = SortedDict()

        for slug, facet in self.facet_counts().items():
            if not isinstance(facet, dict):
                # let's just blankly ignore any non-filter or non-query filters
                continue

            filter_ = filter_mapping.get(slug, None)
            if filter_ is None:
                name = slug
                group = None
            else:
                # Let's check if we can get the name from the gettext catalog
                name = _(filter_['name'])
                group = _(filter_['group']['name'])

            f = Filter(url=url,
                       page=self.current_page,
                       name=name,
                       slug=slug,
                       count=facet.get('count', 0),
                       active=slug in self.topics,
                       group=group)

            filter_groups.setdefault((group, filter_['group']['order']),
                                     []).append(f)

        # return a sorted list of filters here
        grouped_filters = []
        for (group, order), filters in filter_groups.items():
            sorted_filters = sorted(filters, key=attrgetter('name'))
            grouped_filters.append(FilterGroup(group, order, sorted_filters))
        return sorted(grouped_filters, key=attrgetter('order'), reverse=True)
Ejemplo n.º 41
0
    def get_actions(self, request, addon):
        labels, details = self._review_actions()

        actions = SortedDict()
        if not addon.admin_review or acl.action_allowed(
                request, 'ReviewerAdminTools', 'View'):
            if self.review_type != 'preliminary':
                if addon.is_listed:
                    label = _lazy('Push to public')
                else:
                    label = _lazy('Grant full review')
                actions['public'] = {'method': self.handler.process_public,
                                     'minimal': False,
                                     'label': label}
            # An unlisted sideload add-on, which requests a full review, cannot
            # be granted a preliminary review.
            if addon.is_listed or self.review_type == 'preliminary':
                actions['prelim'] = {
                    'method': self.handler.process_preliminary,
                    'label': labels['prelim'],
                    'minimal': False}
            actions['reject'] = {'method': self.handler.process_sandbox,
                                 'label': _lazy('Reject'),
                                 'minimal': False}
        actions['info'] = {'method': self.handler.request_information,
                           'label': _lazy('Request more information'),
                           'minimal': True}
        actions['super'] = {'method': self.handler.process_super_review,
                            'label': _lazy('Request super-review'),
                            'minimal': True}
        actions['comment'] = {'method': self.handler.process_comment,
                              'label': _lazy('Comment'),
                              'minimal': True}
        for k, v in actions.items():
            v['details'] = details.get(k)

        return actions
Ejemplo n.º 42
0
def dump_data(request, appname):
    app_list = SortedDict()

    try:
        if request.method == 'POST':
            for appname in request.POST.getlist('apps'):
                app = get_app(appname)
                app_list[app] = None
            appname = 'choices'
        else:
            app = get_app(appname)
            app_list[app] = None
    except ImproperlyConfigured:
        if appname == 'all':
            for app in get_apps():
                app_list[app] = None

    if (len(app_list) > 0):
        objects = []
        for model in sort_dependencies(app_list.items()):
            if not model._meta.proxy and router.allow_syncdb(
                    DEFAULT_DB_ALIAS, model):
                objects.extend(
                    model._default_manager.using(DEFAULT_DB_ALIAS).all())
        serializers.get_serializer('json')
        json = serializers.serialize('json',
                                     objects,
                                     indent=2,
                                     use_natural_keys=True)
        response = HttpResponse(json, mimetype='application/json')
        response[
            'Content-Disposition'] = 'attachment; filename=%s_%s_fixture.json' % (
                date.today().__str__(), appname)
        return response

    return render_to_response('diagnostic/dumpdata.html',
                              context_instance=RequestContext(request))
Ejemplo n.º 43
0
    def export(self):
        swanpardict = SortedDict(self.get_fields())
        # clean up the common infomation
        del swanpardict['ID']
        del swanpardict['name']
        del swanpardict['user']
        del swanpardict['group']
        del swanpardict['time created']
        del swanpardict['last modified']

        # reconstruct COMPUTE line before deleting
        swanpardict['compute'] = "nonst %s %s hr %s" % (
            swanpardict['time start'], swanpardict['time interval'],
            swanpardict['time end'])
        del swanpardict['time start']
        del swanpardict['time end']
        del swanpardict['time interval']

        if (self.swanconfig):
            swanpar_dir = "%s/%s" % (MEDIA_ROOT,
                                     self.swanconfig.model_input.input_dir)
            swanpar_file = os.path.abspath('%s/%s' % (swanpar_dir, self.name))
            log.into(swanpar_file)

            if os.path.exists(swanpar_file):
                os.remove(swanpar_file)

            f = open(swanpar_file, 'w')

            for name, value in swanpardict.items():
                f.write("%s %s\n" % (name, value))
            f.write('stop')
            f.close()
        else:
            log.warn(
                "no need to export without a model input associated with a parameter file"
            )
Ejemplo n.º 44
0
class SMSChoicesNumEnum(ChoicesNumEnum):
    def __init__(self, *items):
        """
        Receives item with four values key, label, i and send state choices
        """

        super(SMSChoicesNumEnum,
              self).__init__(*((key, label, i) for key, label, i, _ in items))
        self.sender_enum = SortedDict()
        for (key, label, i, sender_choices) in items:
            for j, choice_label in sender_choices:
                self.sender_enum[j] = (choice_label, i)

    @property
    def sender_choices(self):
        return [(val, choice[0]) for val, choice in self.sender_enum.items()]

    def get_value_from_sender_value(self, sender_val):
        """
        Return value according to sender_val
        """

        return self.sender_enum.get(
            sender_val)[1] if sender_val in self.sender_enum else self.ERROR
Ejemplo n.º 45
0
    def get_actions(self, request, addon):
        actions = SortedDict()
        if request is None:
            # If request is not set, it means we are just (ab)using the
            # ReviewHelper for its `handler` attribute and we don't care about
            # the actions.
            return actions
        labels, details = self._review_actions()
        reviewable_because_complete = addon.status not in (amo.STATUS_NULL,
                                                           amo.STATUS_DELETED)
        reviewable_because_admin = (not addon.admin_review
                                    or acl.action_allowed(
                                        request, 'ReviewerAdminTools', 'View'))
        reviewable_because_submission_time = (
            not is_limited_reviewer(request)
            or (addon.latest_version is not None
                and addon.latest_version.nomination is not None and
                (datetime.datetime.now() - addon.latest_version.nomination >=
                 datetime.timedelta(hours=REVIEW_LIMITED_DELAY_HOURS))))
        reviewable_because_pending = addon.latest_version is not None and len(
            addon.latest_version.is_unreviewed) > 0
        if (reviewable_because_complete and reviewable_because_admin
                and reviewable_because_submission_time
                and reviewable_because_pending):
            if self.review_type != 'preliminary':
                if addon.is_listed:
                    label = _lazy('Push to public')
                else:
                    label = _lazy('Grant full review')
                actions['public'] = {
                    'method': self.handler.process_public,
                    'minimal': False,
                    'label': label
                }
            # An unlisted sideload add-on, which requests a full review, cannot
            # be granted a preliminary review.
            if addon.is_listed or self.review_type == 'preliminary':
                actions['prelim'] = {
                    'method': self.handler.process_preliminary,
                    'label': labels['prelim'],
                    'minimal': False
                }
            actions['reject'] = {
                'method': self.handler.process_sandbox,
                'label': _lazy('Reject'),
                'minimal': False
            }
        actions['info'] = {
            'method': self.handler.request_information,
            'label': _lazy('Request more information'),
            'minimal': True
        }
        actions['super'] = {
            'method': self.handler.process_super_review,
            'label': _lazy('Request super-review'),
            'minimal': True
        }
        actions['comment'] = {
            'method': self.handler.process_comment,
            'label': _lazy('Comment'),
            'minimal': True
        }
        for k, v in actions.items():
            v['details'] = details.get(k)

        return actions
Ejemplo n.º 46
0
class TableTab(Tab):
    """
    A :class:`~horizon.tabs.Tab` class which knows how to deal with
    :class:`~horizon.tables.DataTable` classes rendered inside of it.

    This distinct class is required due to the complexity involved in handling
    both dynamic tab loading, dynamic table updating and table actions all
    within one view.

    .. attribute:: table_classes

        An iterable containing the :class:`~horizon.tables.DataTable` classes
        which this tab will contain. Equivalent to the
        :attr:`~horizon.tables.MultiTableView.table_classes` attribute on
        :class:`~horizon.tables.MultiTableView`. For each table class you
        need to define a corresponding ``get_{{ table_name }}_data`` method
        as with :class:`~horizon.tables.MultiTableView`.
    """
    table_classes = None

    def __init__(self, tab_group, request):
        super(TableTab, self).__init__(tab_group, request)
        if not self.table_classes:
            class_name = self.__class__.__name__
            raise NotImplementedError("You must define a table_class "
                                      "attribute on %s" % class_name)
        # Instantiate our table classes but don't assign data yet
        table_instances = [(table._meta.name, table(request,
                                                    **tab_group.kwargs))
                           for table in self.table_classes]
        self._tables = SortedDict(table_instances)
        self._table_data_loaded = False

    def load_table_data(self):
        """
        Calls the ``get_{{ table_name }}_data`` methods for each table class
        and sets the data on the tables.
        """
        # We only want the data to be loaded once, so we track if we have...
        if not self._table_data_loaded:
            for table_name, table in self._tables.items():
                # Fetch the data function.
                func_name = "get_%s_data" % table_name
                data_func = getattr(self, func_name, None)
                if data_func is None:
                    cls_name = self.__class__.__name__
                    raise NotImplementedError("You must define a %s method "
                                              "on %s." % (func_name, cls_name))
                # Load the data.
                table.data = data_func()
                table._meta.has_more_data = self.has_more_data(table)
            # Mark our data as loaded so we don't run the loaders again.
            self._table_data_loaded = True

    def get_context_data(self, request):
        """
        Adds a ``{{ table_name }}_table`` item to the context for each table
        in the :attr:`~horizon.tabs.TableTab.table_classes` attribute.

        If only one table class is provided, a shortcut ``table`` context
        variable is also added containing the single table.
        """
        context = {}
        # If the data hasn't been manually loaded before now,
        # make certain it's loaded before setting the context.
        self.load_table_data()
        for table_name, table in self._tables.items():
            # If there's only one table class, add a shortcut name as well.
            if len(self.table_classes) == 1:
                context["table"] = table
            context["%s_table" % table_name] = table
        return context

    def has_more_data(self, table):
        return False
Ejemplo n.º 47
0
    def handle(self, *app_labels, **options):
        from django.db.models import get_app, get_apps, get_models, get_model

        format = options.get('format', 'json')
        indent = options.get('indent', None)
        using = options.get('database', DEFAULT_DB_ALIAS)
        connection = connections[using]
        excludes = options.get('exclude', [])
        show_traceback = options.get('traceback', False)
        use_natural_keys = options.get('use_natural_keys', False)

        excluded_apps = set()
        excluded_models = set()
        for exclude in excludes:
            if '.' in exclude:
                app_label, model_name = exclude.split('.', 1)
                model_obj = get_model(app_label, model_name)
                if not model_obj:
                    raise CommandError('Unknown model in excludes: %s' %
                                       exclude)
                excluded_models.add(model_obj)
            else:
                try:
                    app_obj = get_app(exclude)
                    excluded_apps.add(app_obj)
                except ImproperlyConfigured:
                    raise CommandError('Unknown app in excludes: %s' % exclude)

        if len(app_labels) == 0:
            app_list = SortedDict(
                (app, None) for app in get_apps() if app not in excluded_apps)
        else:
            app_list = SortedDict()
            for label in app_labels:
                try:
                    app_label, model_label = label.split('.')
                    try:
                        app = get_app(app_label)
                    except ImproperlyConfigured:
                        raise CommandError("Unknown application: %s" %
                                           app_label)
                    if app in excluded_apps:
                        continue
                    model = get_model(app_label, model_label)
                    if model is None:
                        raise CommandError("Unknown model: %s.%s" %
                                           (app_label, model_label))

                    if app in app_list.keys():
                        if app_list[app] and model not in app_list[app]:
                            app_list[app].append(model)
                    else:
                        app_list[app] = [model]
                except ValueError:
                    # This is just an app - no model qualifier
                    app_label = label
                    try:
                        app = get_app(app_label)
                    except ImproperlyConfigured:
                        raise CommandError("Unknown application: %s" %
                                           app_label)
                    if app in excluded_apps:
                        continue
                    app_list[app] = None

        # Check that the serialization format exists; this is a shortcut to
        # avoid collating all the objects and _then_ failing.
        if format not in serializers.get_public_serializer_formats():
            raise CommandError("Unknown serialization format: %s" % format)

        try:
            serializers.get_serializer(format)
        except KeyError:
            raise CommandError("Unknown serialization format: %s" % format)

        # Now collate the objects to be serialized.
        objects = []
        for model in sort_dependencies(app_list.items()):
            if model in excluded_models:
                continue
            if not model._meta.proxy and router.allow_syncdb(using, model):
                objects.extend(model._default_manager.using(using).all())

        try:
            return serializers.serialize(format,
                                         objects,
                                         indent=indent,
                                         use_natural_keys=use_natural_keys)
        except Exception, e:
            if show_traceback:
                raise
            raise CommandError("Unable to serialize database: %s" % e)
Ejemplo n.º 48
0
class Collector(object):
    def __init__(self, using):
        self.using = using
        # Initially, {model: set([instances])}, later values become lists.
        self.data = {}
        self.field_updates = {}  # {model: {(field, value): set([instances])}}
        # fast_deletes is a list of queryset-likes that can be deleted without
        # fetching the objects into memory.
        self.fast_deletes = []

        # Tracks deletion-order dependency for databases without transactions
        # or ability to defer constraint checks. Only concrete model classes
        # should be included, as the dependencies exist only between actual
        # database tables; proxy models are represented here by their concrete
        # parent.
        self.dependencies = {}  # {model: set([models])}

    def add(self, objs, source=None, nullable=False, reverse_dependency=False):
        """
        Adds 'objs' to the collection of objects to be deleted.  If the call is
        the result of a cascade, 'source' should be the model that caused it,
        and 'nullable' should be set to True if the relation can be null.

        Returns a list of all objects that were not already collected.
        """
        if not objs:
            return []
        new_objs = []
        model = objs[0].__class__
        instances = self.data.setdefault(model, set())
        for obj in objs:
            if obj not in instances:
                new_objs.append(obj)
        instances.update(new_objs)
        # Nullable relationships can be ignored -- they are nulled out before
        # deleting, and therefore do not affect the order in which objects have
        # to be deleted.
        if source is not None and not nullable:
            if reverse_dependency:
                source, model = model, source
            self.dependencies.setdefault(source._meta.concrete_model,
                                         set()).add(model._meta.concrete_model)
        return new_objs

    def add_field_update(self, field, value, objs):
        """
        Schedules a field update. 'objs' must be a homogenous iterable
        collection of model instances (e.g. a QuerySet).
        """
        if not objs:
            return
        model = objs[0].__class__
        self.field_updates.setdefault(model, {}).setdefault((field, value),
                                                            set()).update(objs)

    def can_fast_delete(self, objs, from_field=None):
        """
        Determines if the objects in the given queryset-like can be
        fast-deleted. This can be done if there are no cascades, no
        parents and no signal listeners for the object class.

        The 'from_field' tells where we are coming from - we need this to
        determine if the objects are in fact to be deleted. Allows also
        skipping parent -> child -> parent chain preventing fast delete of
        the child.
        """
        if from_field and from_field.rel.on_delete is not CASCADE:
            return False
        if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
            return False
        model = objs.model
        if (signals.pre_delete.has_listeners(model)
                or signals.post_delete.has_listeners(model)
                or signals.m2m_changed.has_listeners(model)):
            return False
        # The use of from_field comes from the need to avoid cascade back to
        # parent when parent delete is cascading to child.
        opts = model._meta
        if any(link != from_field
               for link in opts.concrete_model._meta.parents.values()):
            return False
        # Foreign keys pointing to this model, both from m2m and other
        # models.
        for related in opts.get_all_related_objects(include_hidden=True,
                                                    include_proxy_eq=True):
            if related.field.rel.on_delete is not DO_NOTHING:
                return False
        # GFK deletes
        for relation in opts.many_to_many:
            if not relation.rel.through:
                return False
        return True

    def collect(self,
                objs,
                source=None,
                nullable=False,
                collect_related=True,
                source_attr=None,
                reverse_dependency=False):
        """
        Adds 'objs' to the collection of objects to be deleted as well as all
        parent instances.  'objs' must be a homogenous iterable collection of
        model instances (e.g. a QuerySet).  If 'collect_related' is True,
        related objects will be handled by their respective on_delete handler.

        If the call is the result of a cascade, 'source' should be the model
        that caused it and 'nullable' should be set to True, if the relation
        can be null.

        If 'reverse_dependency' is True, 'source' will be deleted before the
        current model, rather than after. (Needed for cascading to parent
        models, the one case in which the cascade follows the forwards
        direction of an FK rather than the reverse direction.)
        """
        if self.can_fast_delete(objs):
            self.fast_deletes.append(objs)
            return
        new_objs = self.add(objs,
                            source,
                            nullable,
                            reverse_dependency=reverse_dependency)
        if not new_objs:
            return

        model = new_objs[0].__class__

        # Recursively collect concrete model's parent models, but not their
        # related objects. These will be found by meta.get_all_related_objects()
        concrete_model = model._meta.concrete_model
        for ptr in six.itervalues(concrete_model._meta.parents):
            if ptr:
                # FIXME: This seems to be buggy and execute a query for each
                # parent object fetch. We have the parent data in the obj,
                # but we don't have a nice way to turn that data into parent
                # object instance.
                parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
                self.collect(parent_objs,
                             source=model,
                             source_attr=ptr.rel.related_name,
                             collect_related=False,
                             reverse_dependency=True)

        if collect_related:
            for related in model._meta.get_all_related_objects(
                    include_hidden=True, include_proxy_eq=True):
                field = related.field
                if field.rel.on_delete == DO_NOTHING:
                    continue
                sub_objs = self.related_objects(related, new_objs)
                if self.can_fast_delete(sub_objs, from_field=field):
                    self.fast_deletes.append(sub_objs)
                elif sub_objs:
                    field.rel.on_delete(self, field, sub_objs, self.using)

            # TODO This entire block is only needed as a special case to
            # support cascade-deletes for GenericRelation. It should be
            # removed/fixed when the ORM gains a proper abstraction for virtual
            # or composite fields, and GFKs are reworked to fit into that.
            for relation in model._meta.many_to_many:
                if not relation.rel.through:
                    sub_objs = relation.bulk_related_objects(
                        new_objs, self.using)
                    self.collect(sub_objs,
                                 source=model,
                                 source_attr=relation.rel.related_name,
                                 nullable=True)

    def related_objects(self, related, objs):
        """
        Gets a QuerySet of objects related to ``objs`` via the relation ``related``.

        """
        return related.model._base_manager.using(
            self.using).filter(**{"%s__in" % related.field.name: objs})

    def instances_with_model(self):
        for model, instances in six.iteritems(self.data):
            for obj in instances:
                yield model, obj

    def sort(self):
        sorted_models = []
        concrete_models = set()
        models = list(self.data)
        while len(sorted_models) < len(models):
            found = False
            for model in models:
                if model in sorted_models:
                    continue
                dependencies = self.dependencies.get(
                    model._meta.concrete_model)
                if not (dependencies
                        and dependencies.difference(concrete_models)):
                    sorted_models.append(model)
                    concrete_models.add(model._meta.concrete_model)
                    found = True
            if not found:
                return
        self.data = SortedDict([(model, self.data[model])
                                for model in sorted_models])

    @force_managed
    def delete(self):
        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer constraint checks until the
        # end of a transaction.
        self.sort()

        # send pre_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.pre_delete.send(sender=model,
                                        instance=obj,
                                        using=self.using)

        # fast deletes
        for qs in self.fast_deletes:
            qs._raw_delete(using=self.using)

        # update fields
        for model, instances_for_fieldvalues in six.iteritems(
                self.field_updates):
            query = sql.UpdateQuery(model)
            for (field,
                 value), instances in six.iteritems(instances_for_fieldvalues):
                query.update_batch([obj.pk for obj in instances],
                                   {field.name: value}, self.using)

        # reverse instance collections
        for instances in six.itervalues(self.data):
            instances.reverse()

        # delete instances
        for model, instances in six.iteritems(self.data):
            query = sql.DeleteQuery(model)
            pk_list = [obj.pk for obj in instances]
            query.delete_batch(pk_list, self.using)

        # send post_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.post_delete.send(sender=model,
                                         instance=obj,
                                         using=self.using)

        # update collected instances
        for model, instances_for_fieldvalues in six.iteritems(
                self.field_updates):
            for (field,
                 value), instances in six.iteritems(instances_for_fieldvalues):
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in six.iteritems(self.data):
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Ejemplo n.º 49
0
    def handle_noargs(self, **options):
        db = options.get('database')
        connection = connections[db]
        cursor = connection.cursor()
        is_postgres = 'postgres' in settings.DATABASES[db]['ENGINE']
        DB_SETTINGS = POSTGRESQL_SETTINGS if is_postgres else MYSQL_SETTINGS

        # Get a list of already installed *models* so that references work right.
        tables = connection.introspection.table_names()

        # Build the manifest of apps and models that are to be synchronized
        all_models = [(app.__name__.split('.')[-2], [
            m for m in models.get_models(app, include_auto_created=True)
            if router.allow_syncdb(db, m)
        ]) for app in models.get_apps()]

        def model_installed(model):
            opts = model._meta
            converter = connection.introspection.table_name_converter
            return not ((converter(opts.db_table) in tables) or
                        (opts.auto_created and converter(
                            opts.auto_created._meta.db_table) in tables))

        manifest = SortedDict(
            (app_name, list(filter(model_installed, model_list)))
            for app_name, model_list in all_models)

        new_models = []
        for each in manifest.values():
            new_models += each
        if len(new_models) > 0:
            print colorize('[ERROR] Migration is needed')
            print 'Unregistered models:'
            for each in manifest.items():
                for model in each[1]:
                    print ' %s' % pretty_name(model)
            exit(1)

        table_info = []
        tables = connection.introspection.table_names()
        seen_models = connection.introspection.installed_models(tables)
        for model in seen_models:
            table = model._meta.db_table
            columns = [field.column for field in model._meta.fields]
            types = {}
            nullables = {}
            for field in model._meta.fields:
                types[field.column] = field.db_type(connection=connection)
                nullables[field.column] = field.null

            # issue with inheritance
            parents = model._meta.parents
            for parent in parents:
                for field in parent._meta.fields:
                    columns.remove(field.column)
                    del types[field.column]

            table_info.append((table, columns, types, nullables))

        for model in seen_models:
            for field in model._meta.local_many_to_many:
                if hasattr(field, 'creates_table') and not field.creates_table:
                    continue
                table = field.m2m_db_table()
                columns = ['id']  # They always have an id column
                types = {}
                nullables = {}
                types['id'] = 'integer'
                nullables['id'] = False
                columns.append(field.m2m_column_name())
                columns.append(field.m2m_reverse_name())
                types[field.m2m_column_name()] = 'integer'
                types[field.m2m_reverse_name()] = 'integer'
                nullables[field.m2m_column_name()] = False
                nullables[field.m2m_reverse_name()] = False
                table_info.append((table, columns, types, nullables))

        out_of_sync = []
        for app_name, model_list in all_models:
            for model in model_list:
                opts = model._meta
                converter = connection.introspection.table_name_converter
                table_name = converter(opts.db_table)
                for item in table_info:
                    if item[0] == table_name:
                        cursor.execute(DB_SETTINGS[TYPES_REQUEST] % table_name)
                        content = cursor.fetchall()
                        actual_columns = [column[0] for column in content]
                        if set(item[1]) != set(actual_columns):
                            print colorize(
                                '[ERROR] Model fields are out of sync: %s' %
                                pretty_name(model),
                                bold=False)
                            print ' Model fields:       %s' % pretty_list(
                                item[1])
                            print ' Database columns:   %s' % pretty_list(
                                actual_columns)
                            out_of_sync += [model]
                            break
                        for djcolname, djcoltype in item[2].items():
                            dbtypebase = None
                            for coltype in DB_SETTINGS[MAP]:
                                if coltype in djcoltype:
                                    dbtypebase = DB_SETTINGS[MAP][coltype]
                                    break
                            if not dbtypebase:
                                m = varchar_exp.search(djcoltype)
                                if m:
                                    dbtypebase = VARCHAR
                                    varchar_length = int(m.group(1))
                            if not dbtypebase:
                                print colorize(
                                    '[ERROR] Can\'t validate DB. Unknown field type in %s.%s: %s'
                                    %
                                    (pretty_name(model), djcolname, djcoltype))
                                exit(1)
                            for dbcolname, dbcoltype, is_nullable in content:
                                if dbcolname == djcolname:
                                    if dbtypebase not in dbcoltype:
                                        print colorize(
                                            '[ERROR] Inconsistent field type in model \'%s\''
                                            % pretty_name(model),
                                            bold=False)
                                        print ' Model field:       %s %s' % (
                                            djcolname, djcoltype)
                                        print ' Database column:   %s %s' % (
                                            dbcolname, dbcoltype)
                                        out_of_sync += [model]
                                        break
                                    if dbtypebase == VARCHAR:
                                        m = varchar_exp.search(dbcoltype)
                                        if not m or varchar_length != int(
                                                m.group(1)):
                                            print colorize(
                                                '[ERROR] Inconsistent varchar length in model \'%s\''
                                                % pretty_name(model),
                                                bold=False)
                                            print ' Model field:       %s %d' % (
                                                djcolname, varchar_length)
                                            print ' Database column:   %s %d' % (
                                                dbcolname, int(m.group(1)))
                                            out_of_sync += [model]
                                        break
                                    dj_is_nullable = item[3][djcolname]
                                    if dj_is_nullable != yesNoToBool(
                                            is_nullable):
                                        print colorize(
                                            '[ERROR] Inconsistent Nullable in model \'%s\''
                                            % pretty_name(model),
                                            bold=False)
                                        print ' Field: %s' % djcolname
                                        print ' Model NULL: %s (%s)' % (
                                            boolToYesNo(dj_is_nullable),
                                            dj_is_nullable)
                                        print ' Database NULL: %s (%s)' % (
                                            is_nullable,
                                            yesNoToBool(is_nullable))
                                        out_of_sync += [model]
                                        break
                        break
                else:
                    print colorize(
                        '[ERROR] Can\'t validate DB. There\'s must be an error in the script!'
                    )
                    exit(1)
        if out_of_sync:
            print colorize('[ERROR] Migration is needed')
            print ' Model fields are out of sync:'
            for each in set(out_of_sync):
                print ' %s' % pretty_name(each)
            exit(1)

        print colorize('[OK] Migration checks passed', bold=False, green=True)
Ejemplo n.º 50
0
class UnifiedIndex(object):
    # Used to collect all the indexes into a cohesive whole.
    def __init__(self, excluded_indexes=None):
        self._indexes = {}
        self.fields = SortedDict()
        self._built = False
        self.excluded_indexes = excluded_indexes or []
        self.excluded_indexes_ids = {}
        self.document_field = getattr(settings, 'HAYSTACK_DOCUMENT_FIELD',
                                      'text')
        self._fieldnames = {}
        self._facet_fieldnames = {}

    @property
    def indexes(self):
        warnings.warn(
            "'UnifiedIndex.indexes' was deprecated in Haystack v2.3.0. Please use UnifiedIndex.get_indexes()."
        )
        return self._indexes

    def collect_indexes(self):
        indexes = []

        for app_mod in haystack_get_app_modules():
            try:
                search_index_module = importlib.import_module(
                    "%s.search_indexes" % app_mod.__name__)
            except ImportError:
                if module_has_submodule(app_mod, 'search_indexes'):
                    raise

                continue

            for item_name, item in inspect.getmembers(search_index_module,
                                                      inspect.isclass):
                if getattr(item, 'haystack_use_for_indexing',
                           False) and getattr(item, 'get_model', None):
                    # We've got an index. Check if we should be ignoring it.
                    class_path = "%s.search_indexes.%s" % (app_mod.__name__,
                                                           item_name)

                    if class_path in self.excluded_indexes or self.excluded_indexes_ids.get(
                            item_name) == id(item):
                        self.excluded_indexes_ids[str(item_name)] = id(item)
                        continue

                    indexes.append(item())

        return indexes

    def reset(self):
        self._indexes = {}
        self.fields = SortedDict()
        self._built = False
        self._fieldnames = {}
        self._facet_fieldnames = {}

    def build(self, indexes=None):
        self.reset()

        if indexes is None:
            indexes = self.collect_indexes()

        for index in indexes:
            model = index.get_model()

            if model in self._indexes:
                raise ImproperlyConfigured(
                    "Model '%s' has more than one 'SearchIndex`` handling it. "
                    "Please exclude either '%s' or '%s' using the 'EXCLUDED_INDEXES' "
                    "setting defined in 'settings.HAYSTACK_CONNECTIONS'." %
                    (model, self._indexes[model], index))

            self._indexes[model] = index
            self.collect_fields(index)

        self._built = True

    def collect_fields(self, index):
        for fieldname, field_object in index.fields.items():
            if field_object.document is True:
                if field_object.index_fieldname != self.document_field:
                    raise SearchFieldError(
                        "All 'SearchIndex' classes must use the same '%s' fieldname for the 'document=True' field. Offending index is '%s'."
                        % (self.document_field, index))

            # Stow the index_fieldname so we don't have to get it the hard way again.
            if fieldname in self._fieldnames and field_object.index_fieldname != self._fieldnames[
                    fieldname]:
                # We've already seen this field in the list. Raise an exception if index_fieldname differs.
                raise SearchFieldError(
                    "All uses of the '%s' field need to use the same 'index_fieldname' attribute."
                    % fieldname)

            self._fieldnames[fieldname] = field_object.index_fieldname

            # Stow the facet_fieldname so we don't have to look that up either.
            if hasattr(field_object, 'facet_for'):
                if field_object.facet_for:
                    self._facet_fieldnames[field_object.facet_for] = fieldname
                else:
                    self._facet_fieldnames[
                        field_object.instance_name] = fieldname

            # Copy the field in so we've got a unified schema.
            if field_object.index_fieldname not in self.fields:
                self.fields[field_object.index_fieldname] = field_object
                self.fields[field_object.index_fieldname] = copy.copy(
                    field_object)
            else:
                # If the field types are different, we can mostly
                # safely ignore this. The exception is ``MultiValueField``,
                # in which case we'll use it instead, copying over the
                # values.
                if field_object.is_multivalued:
                    old_field = self.fields[field_object.index_fieldname]
                    self.fields[field_object.index_fieldname] = field_object
                    self.fields[field_object.index_fieldname] = copy.copy(
                        field_object)

                    # Switch it so we don't have to dupe the remaining
                    # checks.
                    field_object = old_field

                # We've already got this field in the list. Ensure that
                # what we hand back is a superset of all options that
                # affect the schema.
                if field_object.indexed is True:
                    self.fields[field_object.index_fieldname].indexed = True

                if field_object.stored is True:
                    self.fields[field_object.index_fieldname].stored = True

                if field_object.faceted is True:
                    self.fields[field_object.index_fieldname].faceted = True

                if field_object.use_template is True:
                    self.fields[
                        field_object.index_fieldname].use_template = True

                if field_object.null is True:
                    self.fields[field_object.index_fieldname].null = True

    def get_indexes(self):
        if not self._built:
            self.build()

        return self._indexes

    def get_indexed_models(self):
        # Ensuring a list here since Python3 will give us an iterator
        return list(self.get_indexes().keys())

    def get_index_fieldname(self, field):
        if not self._built:
            self.build()

        return self._fieldnames.get(field) or field

    def get_index(self, model_klass):

        indexes = self.get_indexes()

        if model_klass not in indexes:
            raise NotHandled('The model %s is not registered' % model_klass)

        return indexes[model_klass]

    def get_facet_fieldname(self, field):
        if not self._built:
            self.build()

        for fieldname, field_object in self.fields.items():
            if fieldname != field:
                continue

            if hasattr(field_object, 'facet_for'):
                if field_object.facet_for:
                    return field_object.facet_for
                else:
                    return field_object.instance_name
            else:
                return self._facet_fieldnames.get(field) or field

        return field

    def all_searchfields(self):
        if not self._built:
            self.build()

        return self.fields
Ejemplo n.º 51
0
class SortedDictTests(SimpleTestCase):
    def setUp(self):
        self.d1 = SortedDict()
        self.d1[7] = 'seven'
        self.d1[1] = 'one'
        self.d1[9] = 'nine'

        self.d2 = SortedDict()
        self.d2[1] = 'one'
        self.d2[9] = 'nine'
        self.d2[0] = 'nil'
        self.d2[7] = 'seven'

    def test_basic_methods(self):
        self.assertEqual(self.d1.keys(), [7, 1, 9])
        self.assertEqual(self.d1.values(), ['seven', 'one', 'nine'])
        self.assertEqual(self.d1.items(), [(7, 'seven'), (1, 'one'),
                                           (9, 'nine')])

    def test_overwrite_ordering(self):
        """ Overwriting an item keeps it's place. """
        self.d1[1] = 'ONE'
        self.assertEqual(self.d1.values(), ['seven', 'ONE', 'nine'])

    def test_append_items(self):
        """ New items go to the end. """
        self.d1[0] = 'nil'
        self.assertEqual(self.d1.keys(), [7, 1, 9, 0])

    def test_delete_and_insert(self):
        """
        Deleting an item, then inserting the same key again will place it
        at the end.
        """
        del self.d2[7]
        self.assertEqual(self.d2.keys(), [1, 9, 0])
        self.d2[7] = 'lucky number 7'
        self.assertEqual(self.d2.keys(), [1, 9, 0, 7])

    def test_change_keys(self):
        """
        Changing the keys won't do anything, it's only a copy of the
        keys dict.
        """
        k = self.d2.keys()
        k.remove(9)
        self.assertEqual(self.d2.keys(), [1, 9, 0, 7])

    def test_init_keys(self):
        """
        Initialising a SortedDict with two keys will just take the first one.

        A real dict will actually take the second value so we will too, but
        we'll keep the ordering from the first key found.
        """
        tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
        d = SortedDict(tuples)

        self.assertEqual(d.keys(), [2, 1])

        real_dict = dict(tuples)
        self.assertEqual(sorted(real_dict.values()), ['one', 'second-two'])

        # Here the order of SortedDict values *is* what we are testing
        self.assertEqual(d.values(), ['second-two', 'one'])

    def test_overwrite(self):
        self.d1[1] = 'not one'
        self.assertEqual(self.d1[1], 'not one')
        self.assertEqual(self.d1.keys(), self.d1.copy().keys())

    def test_append(self):
        self.d1[13] = 'thirteen'
        self.assertEqual(repr(self.d1),
                         "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}")

    def test_pop(self):
        self.assertEqual(self.d1.pop(1, 'missing'), 'one')
        self.assertEqual(self.d1.pop(1, 'missing'), 'missing')

        # We don't know which item will be popped in popitem(), so we'll
        # just check that the number of keys has decreased.
        l = len(self.d1)
        self.d1.popitem()
        self.assertEqual(l - len(self.d1), 1)

    def test_dict_equality(self):
        d = SortedDict((i, i) for i in xrange(3))
        self.assertEqual(d, {0: 0, 1: 1, 2: 2})

    def test_tuple_init(self):
        d = SortedDict(((1, "one"), (0, "zero"), (2, "two")))
        self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")

    def test_pickle(self):
        self.assertEqual(pickle.loads(pickle.dumps(self.d1, 2)), {
            7: 'seven',
            1: 'one',
            9: 'nine'
        })

    def test_clear(self):
        self.d1.clear()
        self.assertEqual(self.d1, {})
        self.assertEqual(self.d1.keyOrder, [])
Ejemplo n.º 52
0
    def get_updated_questions_for_user(self, user):
        """
        retreive relevant question updates for the user
        according to their subscriptions and recorded question
        views
        """

        user_feeds = EmailFeedSetting.objects.filter(subscriber=user).exclude(
            frequency__in=('n', 'i'))

        should_proceed = False
        for feed in user_feeds:
            if feed.should_send_now() == True:
                should_proceed = True
                break

        #shortcircuit - if there is no ripe feed to work on for this user
        if should_proceed == False:
            return {}

        #these are placeholders for separate query sets per question group
        #there are four groups - one for each EmailFeedSetting.feed_type
        #and each group has subtypes A and B
        #that's because of the strange thing commented below
        #see note on Q and F objects marked with todo tag
        q_sel_A = None
        q_sel_B = None

        q_ask_A = None
        q_ask_B = None

        q_ans_A = None
        q_ans_B = None

        q_all_A = None
        q_all_B = None

        #base question query set for this user
        #basic things - not deleted, not closed, not too old
        #not last edited by the same user
        base_qs = Post.objects.get_questions(
        ).exclude(thread__last_activity_by=user).exclude(
            thread__last_activity_at__lt=user.date_joined  #exclude old stuff
        ).exclude(deleted=True).exclude(
            thread__closed=True).order_by('-thread__last_activity_at')

        if askbot_settings.ENABLE_CONTENT_MODERATION:
            base_qs = base_qs.filter(approved=True)
        #todo: for some reason filter on did not work as expected ~Q(viewed__who=user) |
        #      Q(viewed__who=user,viewed__when__lt=F('thread__last_activity_at'))
        #returns way more questions than you might think it should
        #so because of that I've created separate query sets Q_set2 and Q_set3
        #plus two separate queries run faster!

        #build two two queries based

        #questions that are not seen by the user at all
        not_seen_qs = base_qs.filter(~Q(viewed__who=user))
        #questions that were seen, but before last modification
        seen_before_last_mod_qs = base_qs.filter(
            Q(viewed__who=user,
              viewed__when__lt=F('thread__last_activity_at')))

        #shorten variables for convenience
        Q_set_A = not_seen_qs
        Q_set_B = seen_before_last_mod_qs

        if getattr(django_settings, 'ASKBOT_MULTILINGUAL', False):
            languages = user.languages.split()
        else:
            languages = None

        for feed in user_feeds:
            if feed.feed_type == 'm_and_c':
                #alerts on mentions and comments are processed separately
                #because comments to questions do not trigger change of last_updated
                #this may be changed in the future though, see
                #http://askbot.org/en/question/96/
                continue

            #each group of updates represented by the corresponding
            #query set has it's own cutoff time
            #that cutoff time is computed for each user individually
            #and stored as a parameter "cutoff_time"

            #we won't send email for a given question if an email has been
            #sent after that cutoff_time
            if feed.should_send_now():
                if DEBUG_THIS_COMMAND == False:
                    feed.mark_reported_now()
                cutoff_time = feed.get_previous_report_cutoff_time()

                if feed.feed_type == 'q_sel':
                    q_sel_A = Q_set_A.filter(thread__followed_by=user)
                    q_sel_A.cutoff_time = cutoff_time  #store cutoff time per query set
                    q_sel_B = Q_set_B.filter(thread__followed_by=user)
                    q_sel_B.cutoff_time = cutoff_time  #store cutoff time per query set

                elif feed.feed_type == 'q_ask':
                    q_ask_A = Q_set_A.filter(author=user)
                    q_ask_A.cutoff_time = cutoff_time
                    q_ask_B = Q_set_B.filter(author=user)
                    q_ask_B.cutoff_time = cutoff_time

                elif feed.feed_type == 'q_ans':
                    q_ans_A = Q_set_A.filter(thread__posts__author=user,
                                             thread__posts__post_type='answer')
                    q_ans_A = q_ans_A[:askbot_settings.MAX_ALERTS_PER_EMAIL]
                    q_ans_A.cutoff_time = cutoff_time

                    q_ans_B = Q_set_B.filter(thread__posts__author=user,
                                             thread__posts__post_type='answer')
                    q_ans_B = q_ans_B[:askbot_settings.MAX_ALERTS_PER_EMAIL]
                    q_ans_B.cutoff_time = cutoff_time

                elif feed.feed_type == 'q_all':
                    q_all_A = user.get_tag_filtered_questions(Q_set_A)
                    q_all_B = user.get_tag_filtered_questions(Q_set_B)

                    q_all_A = q_all_A[:askbot_settings.MAX_ALERTS_PER_EMAIL]
                    q_all_B = q_all_B[:askbot_settings.MAX_ALERTS_PER_EMAIL]
                    q_all_A.cutoff_time = cutoff_time
                    q_all_B.cutoff_time = cutoff_time

        #build ordered list questions for the email report
        q_list = SortedDict()

        #todo: refactor q_list into a separate class?
        extend_question_list(q_sel_A, q_list, languages=languages)
        extend_question_list(q_sel_B, q_list, languages=languages)

        #build list of comment and mention responses here
        #it is separate because posts are not marked as changed
        #when people add comments
        #mention responses could be collected in the loop above, but
        #it is inconvenient, because feed_type m_and_c bundles the two
        #also we collect metadata for these here
        try:
            feed = user_feeds.get(feed_type='m_and_c')
            if feed.should_send_now():
                cutoff_time = feed.get_previous_report_cutoff_time()
                comments = Post.objects.get_comments().filter(
                    added_at__lt=cutoff_time, ).exclude(author=user)
                q_commented = list()
                for c in comments:
                    post = c.parent
                    if post.author != user:
                        continue

                    #skip is post was seen by the user after
                    #the comment posting time
                    q_commented.append(post.get_origin_post())

                extend_question_list(q_commented,
                                     q_list,
                                     cutoff_time=cutoff_time,
                                     add_comment=True,
                                     languages=languages)

                mentions = Activity.objects.get_mentions(
                    mentioned_at__lt=cutoff_time, mentioned_whom=user)

                #print 'have %d mentions' % len(mentions)
                #MM = Activity.objects.filter(activity_type = const.TYPE_ACTIVITY_MENTION)
                #print 'have %d total mentions' % len(MM)
                #for m in MM:
                #    print m

                mention_posts = get_all_origin_posts(mentions)
                q_mentions_id = [q.id for q in mention_posts]

                q_mentions_A = Q_set_A.filter(id__in=q_mentions_id)
                q_mentions_A.cutoff_time = cutoff_time
                extend_question_list(q_mentions_A,
                                     q_list,
                                     add_mention=True,
                                     languages=languages)

                q_mentions_B = Q_set_B.filter(id__in=q_mentions_id)
                q_mentions_B.cutoff_time = cutoff_time
                extend_question_list(q_mentions_B,
                                     q_list,
                                     add_mention=True,
                                     languages=languages)
        except EmailFeedSetting.DoesNotExist:
            pass

        if user.email_tag_filter_strategy == const.INCLUDE_INTERESTING:
            extend_question_list(q_all_A, q_list, languages=languages)
            extend_question_list(q_all_B, q_list, languages=languages)

        extend_question_list(q_ask_A, q_list, limit=True, languages=languages)
        extend_question_list(q_ask_B, q_list, limit=True, languages=languages)

        extend_question_list(q_ans_A, q_list, limit=True, languages=languages)
        extend_question_list(q_ans_B, q_list, limit=True, languages=languages)

        if user.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
            extend_question_list(q_all_A,
                                 q_list,
                                 limit=True,
                                 languages=languages)
            extend_question_list(q_all_B,
                                 q_list,
                                 limit=True,
                                 languages=languages)

        ctype = ContentType.objects.get_for_model(Post)
        EMAIL_UPDATE_ACTIVITY = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT

        #up to this point we still don't know if emails about
        #collected questions were sent recently
        #the next loop examines activity record and decides
        #for each question, whether it needs to be included or not
        #into the report

        for q, meta_data in q_list.items():
            #this loop edits meta_data for each question
            #so that user will receive counts on new edits new answers, etc
            #and marks questions that need to be skipped
            #because an email about them was sent recently enough

            #also it keeps a record of latest email activity per question per user
            try:
                #todo: is it possible to use content_object here, instead of
                #content type and object_id pair?
                update_info = Activity.objects.get(
                    user=user,
                    content_type=ctype,
                    object_id=q.id,
                    activity_type=EMAIL_UPDATE_ACTIVITY)
                emailed_at = update_info.active_at
            except Activity.DoesNotExist:
                update_info = Activity(user=user,
                                       content_object=q,
                                       activity_type=EMAIL_UPDATE_ACTIVITY)
                emailed_at = datetime.datetime(1970, 1, 1)  #long time ago
            except Activity.MultipleObjectsReturned:
                raise Exception(
                    'server error - multiple question email activities '
                    'found per user-question pair')

            cutoff_time = meta_data[
                'cutoff_time']  #cutoff time for the question

            #skip question if we need to wait longer because
            #the delay before the next email has not yet elapsed
            #or if last email was sent after the most recent modification
            if emailed_at > cutoff_time or emailed_at > q.thread.last_activity_at:
                meta_data['skip'] = True
                continue

            #collect info on all sorts of news that happened after
            #the most recent emailing to the user about this question
            q_rev = q.revisions.filter(revised_at__gt=emailed_at)
            q_rev = q_rev.exclude(author=user)

            #now update all sorts of metadata per question
            meta_data['q_rev'] = len(q_rev)
            if len(q_rev) > 0 and q.added_at == q_rev[0].revised_at:
                meta_data['q_rev'] = 0
                meta_data['new_q'] = True
            else:
                meta_data['new_q'] = False

            new_ans = Post.objects.get_answers(user).filter(
                thread=q.thread,
                added_at__gt=emailed_at,
                deleted=False,
            )
            new_ans = new_ans.exclude(author=user)
            meta_data['new_ans'] = len(new_ans)

            ans_ids = Post.objects.get_answers(user).filter(
                thread=q.thread,
                added_at__gt=emailed_at,
                deleted=False,
            ).values_list('id', flat=True)
            ans_rev = PostRevision.objects.filter(post__id__in=ans_ids)
            ans_rev = ans_rev.exclude(author=user).distinct()
            meta_data['ans_rev'] = len(ans_rev)

            comments = meta_data.get('comments', 0)
            mentions = meta_data.get('mentions', 0)

            #print meta_data
            #finally skip question if there are no news indeed
            if len(q_rev) + len(new_ans) + len(
                    ans_rev) + comments + mentions == 0:
                meta_data['skip'] = True
                #print 'skipping'
            else:
                meta_data['skip'] = False
                #print 'not skipping'
                update_info.active_at = datetime.datetime.now()
                if DEBUG_THIS_COMMAND == False:
                    update_info.save()  #save question email update activity
        #q_list is actually an ordered dictionary
        #print 'user %s gets %d' % (user.username, len(q_list.keys()))
        #todo: sort question list by update time
        return q_list
Ejemplo n.º 53
0
class Collector(object):
    def __init__(self, using):
        self.using = using
        # Initially, {model: set([instances])}, later values become lists.
        self.data = {}
        self.batches = {}  # {model: {field: set([instances])}}
        self.field_updates = {}  # {model: {(field, value): set([instances])}}
        self.dependencies = {}  # {model: set([models])}

    def add(self, objs, source=None, nullable=False, reverse_dependency=False):
        """
        Adds 'objs' to the collection of objects to be deleted.  If the call is
        the result of a cascade, 'source' should be the model that caused it
        and 'nullable' should be set to True, if the relation can be null.

        Returns a list of all objects that were not already collected.
        """
        if not objs:
            return []
        new_objs = []
        model = objs[0].__class__
        instances = self.data.setdefault(model, set())
        for obj in objs:
            if obj not in instances:
                new_objs.append(obj)
        instances.update(new_objs)
        # Nullable relationships can be ignored -- they are nulled out before
        # deleting, and therefore do not affect the order in which objects have
        # to be deleted.
        if new_objs and source is not None and not nullable:
            if reverse_dependency:
                source, model = model, source
            self.dependencies.setdefault(source, set()).add(model)
        return new_objs

    def add_batch(self, model, field, objs):
        """
        Schedules a batch delete. Every instance of 'model' that is related to
        an instance of 'obj' through 'field' will be deleted.
        """
        self.batches.setdefault(model, {}).setdefault(field,
                                                      set()).update(objs)

    def add_field_update(self, field, value, objs):
        """
        Schedules a field update. 'objs' must be a homogenous iterable
        collection of model instances (e.g. a QuerySet).
        """
        if not objs:
            return
        model = objs[0].__class__
        self.field_updates.setdefault(model, {}).setdefault((field, value),
                                                            set()).update(objs)

    def collect(self,
                objs,
                source=None,
                nullable=False,
                collect_related=True,
                source_attr=None,
                reverse_dependency=False):
        """
        Adds 'objs' to the collection of objects to be deleted as well as all
        parent instances.  'objs' must be a homogenous iterable collection of
        model instances (e.g. a QuerySet).  If 'collect_related' is True,
        related objects will be handled by their respective on_delete handler.

        If the call is the result of a cascade, 'source' should be the model
        that caused it and 'nullable' should be set to True, if the relation
        can be null.

        If 'reverse_dependency' is True, 'source' will be deleted before the
        current model, rather than after. (Needed for cascading to parent
        models, the one case in which the cascade follows the forwards
        direction of an FK rather than the reverse direction.)
        """
        new_objs = self.add(objs,
                            source,
                            nullable,
                            reverse_dependency=reverse_dependency)
        if not new_objs:
            return
        model = new_objs[0].__class__

        # Recursively collect parent models, but not their related objects.
        # These will be found by meta.get_all_related_objects()
        for parent_model, ptr in model._meta.parents.iteritems():
            if ptr:
                parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
                self.collect(parent_objs,
                             source=model,
                             source_attr=ptr.rel.related_name,
                             collect_related=False,
                             reverse_dependency=True)

        if collect_related:
            for related in model._meta.get_all_related_objects(
                    include_hidden=True):
                field = related.field
                if related.model._meta.auto_created:
                    self.add_batch(related.model, field, new_objs)
                else:
                    sub_objs = self.related_objects(related, new_objs)
                    if not sub_objs:
                        continue
                    field.rel.on_delete(self, field, sub_objs, self.using)

            # TODO This entire block is only needed as a special case to
            # support cascade-deletes for GenericRelation. It should be
            # removed/fixed when the ORM gains a proper abstraction for virtual
            # or composite fields, and GFKs are reworked to fit into that.
            for relation in model._meta.many_to_many:
                if not relation.rel.through:
                    sub_objs = relation.bulk_related_objects(
                        new_objs, self.using)
                    self.collect(sub_objs,
                                 source=model,
                                 source_attr=relation.rel.related_name,
                                 nullable=True)

    def related_objects(self, related, objs):
        """
        Gets a QuerySet of objects related to ``objs`` via the relation ``related``.

        """
        return related.model._base_manager.using(
            self.using).filter(**{"%s__in" % related.field.name: objs})

    def instances_with_model(self):
        for model, instances in self.data.iteritems():
            for obj in instances:
                yield model, obj

    def sort(self):
        sorted_models = []
        models = self.data.keys()
        while len(sorted_models) < len(models):
            found = False
            for model in models:
                if model in sorted_models:
                    continue
                dependencies = self.dependencies.get(model)
                if not (dependencies
                        and dependencies.difference(sorted_models)):
                    sorted_models.append(model)
                    found = True
            if not found:
                return
        self.data = SortedDict([(model, self.data[model])
                                for model in sorted_models])

    @force_managed
    def delete(self):
        # sort instance collections
        for model, instances in self.data.items():
            self.data[model] = sorted(instances, key=attrgetter("pk"))

        # if possible, bring the models in an order suitable for databases that
        # don't support transactions or cannot defer contraint checks until the
        # end of a transaction.
        self.sort()

        # send pre_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.pre_delete.send(sender=model,
                                        instance=obj,
                                        using=self.using)

        # update fields
        for model, instances_for_fieldvalues in self.field_updates.iteritems():
            query = sql.UpdateQuery(model)
            for (field,
                 value), instances in instances_for_fieldvalues.iteritems():
                query.update_batch([obj.pk for obj in instances],
                                   {field.name: value}, self.using)

        # reverse instance collections
        for instances in self.data.itervalues():
            instances.reverse()

        # delete batches
        for model, batches in self.batches.iteritems():
            query = sql.DeleteQuery(model)
            for field, instances in batches.iteritems():
                query.delete_batch([obj.pk for obj in instances], self.using,
                                   field)

        # delete instances
        for model, instances in self.data.iteritems():
            query = sql.DeleteQuery(model)
            pk_list = [obj.pk for obj in instances]
            query.delete_batch(pk_list, self.using)

        # send post_delete signals
        for model, obj in self.instances_with_model():
            if not model._meta.auto_created:
                signals.post_delete.send(sender=model,
                                         instance=obj,
                                         using=self.using)

        # update collected instances
        for model, instances_for_fieldvalues in self.field_updates.iteritems():
            for (field,
                 value), instances in instances_for_fieldvalues.iteritems():
                for obj in instances:
                    setattr(obj, field.attname, value)
        for model, instances in self.data.iteritems():
            for instance in instances:
                setattr(instance, model._meta.pk.attname, None)
Ejemplo n.º 54
0
class UnifiedIndex(object):
    # Used to collect all the indexes into a cohesive whole.
    def __init__(self, excluded_indexes=None):
        self.indexes = {}
        self.fields = SortedDict()
        self._build_lock = threading.RLock()
        self._built = False
        self._indexes_setup = False
        self.excluded_indexes = excluded_indexes or []
        self.excluded_indexes_ids = {}
        self.document_field = getattr(settings, 'HAYSTACK_DOCUMENT_FIELD',
                                      'text')
        self._fieldnames = {}
        self._facet_fieldnames = {}

    def collect_indexes(self):
        indexes = []

        for app in settings.INSTALLED_APPS:
            mod = importlib.import_module(app)

            try:
                search_index_module = importlib.import_module(
                    "%s.search_indexes" % app)
            except ImportError:
                if module_has_submodule(mod, 'search_indexes'):
                    raise

                continue

            for item_name, item in inspect.getmembers(search_index_module,
                                                      inspect.isclass):
                if getattr(item, 'haystack_use_for_indexing',
                           False) and getattr(item, 'get_model', None):
                    # We've got an index. Check if we should be ignoring it.
                    class_path = "%s.search_indexes.%s" % (app, item_name)

                    if class_path in self.excluded_indexes or self.excluded_indexes_ids.get(
                            item_name) == id(item):
                        self.excluded_indexes_ids[str(item_name)] = id(item)
                        continue

                    indexes.append(item())

        return indexes

    def reset(self):
        self.indexes = {}
        self.fields = SortedDict()
        self._built = False
        self._fieldnames = {}
        self._facet_fieldnames = {}

    def build(self, indexes=None):
        """
        Build/rebuild the index data. This class is not thread-safe when
        indexes are passed into this method, because it will trigger reset()
        and that clears a bunch of data that other threads might be using.

        Luckily, web requests never call this passing in indexes, so there
        is no need to lock down access to every piece of data on this class.
        Only tests and management commands are not thread-safe, and they don't
        use threads.

        """
        with self._build_lock:
            if not self._built or indexes is not None:
                self._build(indexes=indexes)

    def _build(self, indexes=None):
        self.reset()

        if indexes is None:
            indexes = self.collect_indexes()

        for index in indexes:
            model = index.get_model()

            if model in self.indexes:
                raise ImproperlyConfigured(
                    "Model '%s' has more than one 'SearchIndex`` handling it. Please exclude either '%s' or '%s' using the 'HAYSTACK_EXCLUDED_INDEXES' setting."
                    % (model, self.indexes[model], index))

            self.indexes[model] = index
            self.collect_fields(index)

        self._built = True

    def collect_fields(self, index):
        for fieldname, field_object in index.fields.items():
            if field_object.document is True:
                if field_object.index_fieldname != self.document_field:
                    raise SearchFieldError(
                        "All 'SearchIndex' classes must use the same '%s' fieldname for the 'document=True' field. Offending index is '%s'."
                        % (self.document_field, index))

            # Stow the index_fieldname so we don't have to get it the hard way again.
            if fieldname in self._fieldnames and field_object.index_fieldname != self._fieldnames[
                    fieldname]:
                # We've already seen this field in the list. Raise an exception if index_fieldname differs.
                raise SearchFieldError(
                    "All uses of the '%s' field need to use the same 'index_fieldname' attribute."
                    % fieldname)

            self._fieldnames[fieldname] = field_object.index_fieldname

            # Stow the facet_fieldname so we don't have to look that up either.
            if hasattr(field_object, 'facet_for'):
                if field_object.facet_for:
                    self._facet_fieldnames[field_object.facet_for] = fieldname
                else:
                    self._facet_fieldnames[
                        field_object.instance_name] = fieldname

            # Copy the field in so we've got a unified schema.
            if not field_object.index_fieldname in self.fields:
                self.fields[field_object.index_fieldname] = field_object
                self.fields[field_object.index_fieldname] = copy.copy(
                    field_object)
            else:
                # If the field types are different, we can mostly
                # safely ignore this. The exception is ``MultiValueField``,
                # in which case we'll use it instead, copying over the
                # values.
                if field_object.is_multivalued == True:
                    old_field = self.fields[field_object.index_fieldname]
                    self.fields[field_object.index_fieldname] = field_object
                    self.fields[field_object.index_fieldname] = copy.copy(
                        field_object)

                    # Switch it so we don't have to dupe the remaining
                    # checks.
                    field_object = old_field

                # We've already got this field in the list. Ensure that
                # what we hand back is a superset of all options that
                # affect the schema.
                if field_object.indexed is True:
                    self.fields[field_object.index_fieldname].indexed = True

                if field_object.stored is True:
                    self.fields[field_object.index_fieldname].stored = True

                if field_object.faceted is True:
                    self.fields[field_object.index_fieldname].faceted = True

                if field_object.use_template is True:
                    self.fields[
                        field_object.index_fieldname].use_template = True

                if field_object.null is True:
                    self.fields[field_object.index_fieldname].null = True

    def setup_indexes(self):
        if not self._built:
            self.build()

        if self._indexes_setup:
            return

        for model_ct, index in self.indexes.items():
            index._setup_save()
            index._setup_delete()

        self._indexes_setup = True

    def teardown_indexes(self):
        if not self._built:
            self.build()

        for model_ct, index in self.indexes.items():
            index._teardown_save()
            index._teardown_delete()

        self._indexes_setup = False

    def get_indexed_models(self):
        if not self._built:
            self.build()

        return self.indexes.keys()

    def get_index_fieldname(self, field):
        if not self._built:
            self.build()

        return self._fieldnames.get(field) or field

    def get_index(self, model_klass):
        if not self._built:
            self.build()

        if model_klass not in self.indexes:
            raise NotHandled('The model %s is not registered' %
                             model_klass.__class__)

        return self.indexes[model_klass]

    def get_facet_fieldname(self, field):
        if not self._built:
            self.build()

        for fieldname, field_object in self.fields.items():
            if fieldname != field:
                continue

            if hasattr(field_object, 'facet_for'):
                if field_object.facet_for:
                    return field_object.facet_for
                else:
                    return field_object.instance_name
            else:
                return self._facet_fieldnames.get(field) or field

        return field

    def all_searchfields(self):
        if not self._built:
            self.build()

        return self.fields
Ejemplo n.º 55
0
    def handle_inspection(self, options):
        connection = connections[options.get('database')]
        # 'table_name_filter' is a stealth option
        table_name_filter = options.get('table_name_filter')

        table2model = lambda table_name: table_name.title().replace(
            '_', '').replace(' ', '').replace('-', '')
        strip_prefix = lambda s: s[1:] if s.startswith("u'") else s

        cursor = connection.cursor()
        yield "# This is an auto-generated Django model module."
        yield "# You'll have to do the following manually to clean this up:"
        yield "#   * Rearrange models' order"
        yield "#   * Make sure each model has one field with primary_key=True"
        yield "#   * Remove `managed = False` lines if you wish to allow Django to create and delete the table"
        yield "# Feel free to rename the models, but don't rename db_table values or field names."
        yield "#"
        yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'"
        yield "# into your database."
        yield "from __future__ import unicode_literals"
        yield ''
        yield 'from %s import models' % self.db_module
        yield ''
        known_models = []
        for table_name in connection.introspection.table_names(cursor):
            if table_name_filter is not None and callable(table_name_filter):
                if not table_name_filter(table_name):
                    continue
            yield 'class %s(models.Model):' % table2model(table_name)
            known_models.append(table2model(table_name))
            try:
                relations = connection.introspection.get_relations(
                    cursor, table_name)
            except NotImplementedError:
                relations = {}
            try:
                indexes = connection.introspection.get_indexes(
                    cursor, table_name)
            except NotImplementedError:
                indexes = {}
            used_column_names = [
            ]  # Holds column names used in the table so far
            for i, row in enumerate(
                    connection.introspection.get_table_description(
                        cursor, table_name)):
                comment_notes = [
                ]  # Holds Field notes, to be displayed in a Python comment.
                extra_params = SortedDict(
                )  # Holds Field parameters such as 'db_column'.
                column_name = row[0]
                is_relation = i in relations

                att_name, params, notes = self.normalize_col_name(
                    column_name, used_column_names, is_relation)
                extra_params.update(params)
                comment_notes.extend(notes)

                used_column_names.append(att_name)

                # Add primary_key and unique, if necessary.
                if column_name in indexes:
                    if indexes[column_name]['primary_key']:
                        extra_params['primary_key'] = True
                    elif indexes[column_name]['unique']:
                        extra_params['unique'] = True

                if is_relation:
                    rel_to = "self" if relations[i][
                        1] == table_name else table2model(relations[i][1])
                    if rel_to in known_models:
                        field_type = 'ForeignKey(%s' % rel_to
                    else:
                        field_type = "ForeignKey('%s'" % rel_to
                else:
                    # Calling `get_field_type` to get the field type string and any
                    # additional paramters and notes.
                    field_type, field_params, field_notes = self.get_field_type(
                        connection, table_name, row)
                    extra_params.update(field_params)
                    comment_notes.extend(field_notes)

                    field_type += '('

                # Don't output 'id = meta.AutoField(primary_key=True)', because
                # that's assumed if it doesn't exist.
                if att_name == 'id' and field_type == 'AutoField(' and extra_params == {
                        'primary_key': True
                }:
                    continue

                # Add 'null' and 'blank', if the 'null_ok' flag was present in the
                # table description.
                if row[6]:  # If it's NULL...
                    if field_type == 'BooleanField(':
                        field_type = 'NullBooleanField('
                    else:
                        extra_params['blank'] = True
                        if not field_type in ('TextField(', 'CharField('):
                            extra_params['null'] = True

                field_desc = '%s = models.%s' % (att_name, field_type)
                if extra_params:
                    if not field_desc.endswith('('):
                        field_desc += ', '
                    field_desc += ', '.join([
                        '%s=%s' % (k, strip_prefix(repr(v)))
                        for k, v in extra_params.items()
                    ])
                field_desc += ')'
                if comment_notes:
                    field_desc += ' # ' + ' '.join(comment_notes)
                yield '    %s' % field_desc
            for meta_line in self.get_meta(table_name):
                yield meta_line
Ejemplo n.º 56
0
    def handle_noargs(self, **options):

        verbosity = int(options.get('verbosity'))
        interactive = options.get('interactive')
        show_traceback = options.get('traceback')
        load_initial_data = options.get('load_initial_data')

        self.style = no_style()

        # Import the 'management' module within each installed app, to register
        # dispatcher events.
        for app_name in settings.INSTALLED_APPS:
            try:
                import_module('.management', app_name)
            except ImportError as exc:
                # This is slightly hackish. We want to ignore ImportErrors
                # if the "management" module itself is missing -- but we don't
                # want to ignore the exception if the management module exists
                # but raises an ImportError for some reason. The only way we
                # can do this is to check the text of the exception. Note that
                # we're a bit broad in how we check the text, because different
                # Python implementations may not use the same text.
                # CPython uses the text "No module named management"
                # PyPy uses "No module named myproject.myapp.management"
                msg = exc.args[0]
                if not msg.startswith(
                        'No module named') or 'management' not in msg:
                    raise

        db = options.get('database')
        connection = connections[db]
        cursor = connection.cursor()

        # Get a list of already installed *models* so that references work right.
        tables = connection.introspection.table_names()
        seen_models = connection.introspection.installed_models(tables)
        created_models = set()
        pending_references = {}

        # Build the manifest of apps and models that are to be synchronized
        all_models = [(app.__name__.split('.')[-2], [
            m for m in models.get_models(app, include_auto_created=True)
            if router.allow_syncdb(db, m)
        ]) for app in models.get_apps()]

        def model_installed(model):
            opts = model._meta
            converter = connection.introspection.table_name_converter
            return not ((converter(opts.db_table) in tables) or
                        (opts.auto_created and converter(
                            opts.auto_created._meta.db_table) in tables))

        manifest = SortedDict(
            (app_name, list(filter(model_installed, model_list)))
            for app_name, model_list in all_models)

        # Create the tables for each model
        if verbosity >= 1:
            self.stdout.write("Creating tables ...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                # Create the model's database table, if it doesn't already exist.
                if verbosity >= 3:
                    self.stdout.write("Processing %s.%s model\n" %
                                      (app_name, model._meta.object_name))
                sql, references = connection.creation.sql_create_model(
                    model, self.style, seen_models)
                seen_models.add(model)
                created_models.add(model)
                for refto, refs in references.items():
                    pending_references.setdefault(refto, []).extend(refs)
                    if refto in seen_models:
                        sql.extend(
                            connection.creation.sql_for_pending_references(
                                refto, self.style, pending_references))
                sql.extend(
                    connection.creation.sql_for_pending_references(
                        model, self.style, pending_references))
                if verbosity >= 1 and sql:
                    self.stdout.write("Creating table %s\n" %
                                      model._meta.db_table)
                for statement in sql:
                    cursor.execute(statement)
                tables.append(
                    connection.introspection.table_name_converter(
                        model._meta.db_table))

        transaction.commit_unless_managed(using=db)

        # Send the post_syncdb signal, so individual apps can do whatever they need
        # to do at this point.
        emit_post_sync_signal(created_models, verbosity, interactive, db)

        # The connection may have been closed by a syncdb handler.
        cursor = connection.cursor()

        if verbosity >= 1:
            self.stdout.write("Installing indexes ...\n")
        # Install SQL indices for all newly created models
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    index_sql = connection.creation.sql_indexes_for_model(
                        model, self.style)
                    if index_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing index for %s.%s model\n" %
                                (app_name, model._meta.object_name))
                        try:
                            for sql in index_sql:
                                cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
                                                (app_name, model._meta.object_name, e))
                            transaction.rollback_unless_managed(using=db)
                        else:
                            transaction.commit_unless_managed(using=db)

        # Install custom SQL for the app (but only if this
        # is a model we've just created)
        if verbosity >= 1:
            self.stdout.write("Installing custom SQL ...\n")
        for app_name, model_list in manifest.items():
            for model in model_list:
                if model in created_models:
                    custom_sql = custom_sql_for_model(model, self.style,
                                                      connection)
                    if custom_sql:
                        if verbosity >= 2:
                            self.stdout.write(
                                "Installing custom SQL for %s.%s model\n" %
                                (app_name, model._meta.object_name))
                        try:
                            for sql in custom_sql:
                                cursor.execute(sql)
                        except Exception as e:
                            self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
                                                (app_name, model._meta.object_name, e))
                            if show_traceback:
                                traceback.print_exc()
                            transaction.rollback_unless_managed(using=db)
                        else:
                            transaction.commit_unless_managed(using=db)
                    else:
                        if verbosity >= 3:
                            self.stdout.write(
                                "No custom SQL for %s.%s model\n" %
                                (app_name, model._meta.object_name))

        # Load initial_data fixtures (unless that has been disabled)
        if load_initial_data:
            call_command('loaddata',
                         'initial_data',
                         verbosity=verbosity,
                         database=db,
                         skip_validation=True)
Ejemplo n.º 57
0
def generate_model(model_description, mapping, db_key=''):
    """Uses instrospection to generate a Django model from a database table.
    """
    connection = db.connections[db_key]
    cursor = connection.cursor()

    table_name = model_description.name

    table2model = lambda table_name: table_name.title().replace(
        '_', '').replace(' ', '').replace('-', '')
    strip_prefix = lambda s: s[1:] if s.startswith("u'") else s

    try:
        relations = connection.introspection.get_relations(cursor, table_name)
    except NotImplementedError:
        relations = {}
    try:
        indexes = connection.introspection.get_indexes(cursor, table_name)
    except NotImplementedError:
        indexes = {}
    used_column_names = []  # Holds column names used in the table so far
    for i, row in enumerate(
            connection.introspection.get_table_description(cursor,
                                                           table_name)):
        comment_notes = [
        ]  # Holds Field notes, to be displayed in a Python comment.
        extra_params = SortedDict(
        )  # Holds Field parameters such as 'db_column'.
        column_name = row[0]
        is_relation = i in relations

        att_name, params, notes = normalize_col_name(column_name,
                                                     used_column_names,
                                                     is_relation)
        extra_params.update(params)
        comment_notes.extend(notes)

        used_column_names.append(att_name)

        # Add primary_key and unique, if necessary.
        if column_name in indexes:
            if indexes[column_name]['primary_key']:
                extra_params['primary_key'] = True
            elif indexes[column_name]['unique']:
                extra_params['unique'] = True

        # Calling `get_field_type` to get the field type string and any
        # additional parameters and notes
        field_type, field_params, field_notes = get_field_type(
            connection, table_name, row)
        extra_params.update(field_params)
        comment_notes.extend(field_notes)

        GEOM_FIELDS = {
            'GEOMETRYCOLLECTION': 'GeometryCollectionField',
            'POINT': 'PointField',
            'MULTIPOINT': 'MultiPointField',
            'LINESTRING': 'LineStringField',
            'MULTILINESTRING': 'MultiLineStringField',
            'POLYGON': 'PolygonField',
            'MULTIPOLYGON': 'MultiPolygonField',
            'GEOMETRY': 'GeometryField',
        }

        geom_type = mapping['geom']

        # Use the geom_type to override the geometry field.
        if field_type == 'GeometryField':
            if geom_type in GEOM_FIELDS:
                field_type = GEOM_FIELDS[geom_type]

        # Change the type of id to AutoField to get auto generated ids.
        if att_name == 'id' and extra_params == {'primary_key': True}:
            field_type = 'AutoField'

        # Add 'null' and 'blank', if the 'null_ok' flag was present in the
        # table description.
        if row[6]:  # If it's NULL...
            if field_type == 'BooleanField':
                field_type = 'NullBooleanField'
            else:
                extra_params['blank'] = True
                if not field_type in ('TextField', 'CharField'):
                    extra_params['null'] = True

        field_desc = (att_name, field_type, extra_params)

        if any(field_type) and column_name != 'id':
            field, __ = Field.objects.get_or_create(model=model_description,
                                                    name=att_name)
            field.type = field_type
            field.original_name = mapping[column_name]

            field.save()

            for name, value in extra_params.items():
                if any(name):
                    Setting.objects.get_or_create(field=field,
                                                  name=name,
                                                  value=value)
Ejemplo n.º 58
0
def build_changelog(docs_path, package_name="mezzanine"):
    """
    Converts Mercurial commits into a changelog in RST format.
    """

    project_path = os.path.join(docs_path, "..")
    version_file = os.path.join(package_name, "__init__.py")
    version_var = "__version__"
    changelog_filename = "CHANGELOG"
    changelog_file = os.path.join(project_path, changelog_filename)
    versions = SortedDict()
    repo = None
    ignore = ("AUTHORS", "formatting", "typo", "pep8", "pep 8",
              "whitespace", "README", "trans", "print debug",
              "debugging", "tabs", "style", "sites", "ignore",
              "tweak", "cleanup", "minor", "for changeset",
              ".com``", "oops", "syntax")
    hotfixes = {
        "40cbc47b8d8a": "1.0.9",
        "a25749986abc": "1.0.10",
    }

    # Load the repo.
    try:
        from mercurial import ui, hg, error
        from mercurial.commands import tag
    except ImportError:
        pass
    else:
        try:
            ui = ui.ui()
            repo = hg.repository(ui, project_path)
        except error.RepoError:
            return
    if repo is None:
        return

    # Go through each changeset and assign it to the versions dict.
    changesets = [repo.changectx(changeset) for changeset in repo.changelog]
    for cs in sorted(changesets, reverse=True, key=_changeset_date):
        # Check if the file with the version number is in this changeset
        # and if it is, pull it out and assign it as a variable.
        files = cs.files()
        new_version = False
        # Commit message cleanup hacks.
        description = cs.description().decode("utf-8")
        description = description.rstrip(".").replace("\n", ". ")
        while "  " in description:
            description = description.replace("  ", " ")
        description = description.replace(". . ", ". ").replace("...", ",")
        while ".." in description:
            description = description.replace("..", ".")
        description = description.replace(":.", ":").replace("n'. t", "n't")
        words = description.split()
        # Format var names in commit.
        for i, word in enumerate(words):
            if (set("._") & set(word[:-1]) and set(letters) & set(word) and
                    "`" not in word and not word[0].isdigit()):
                last = ""
                if word[-1] in ",.":
                    last, word = word[-1], word[:-1]
                words[i] = "``%s``%s" % (word, last)
        description = " ".join(words)
        if version_file in files:
            for line in cs[version_file].data().split("\n"):
                if line.startswith(version_var):
                    exec(line)
                    if locals()[version_var] == "0.1.0":
                        locals()[version_var] = "1.0.0"
                        break
                    versions[locals()[version_var]] = {
                        "changes": [],
                        "date": _changeset_date(cs).strftime("%b %d, %Y")
                    }
                    new_version = len(files) == 1

        # Tag new versions.
        hotfix = hotfixes.get(cs.hex()[:12])
        if hotfix or new_version:
            if hotfix:
                version_tag = hotfix
            else:
                try:
                    version_tag = locals()[version_var]
                except KeyError:
                    version_tag = None
            if version_tag and version_tag not in cs.tags():
                try:
                    tag(ui, repo, version_tag, rev=cs.hex())
                    print("Tagging version %s" % version_tag)
                except:
                    pass

        # Ignore changesets that are merges, bumped the version, closed
        # a branch, regenerated the changelog itself, contain an ignore
        # word, or are one word long.
        merge = len(cs.parents()) > 1
        branch_closed = len(files) == 0
        changelog_update = changelog_filename in files
        ignored = [w for w in ignore if w.lower() in description.lower()]
        one_word = len(description.split()) == 1
        if (merge or new_version or branch_closed or changelog_update or
                ignored or one_word):
            continue
        # Ensure we have a current version and if so, add this changeset's
        # description to it.
        version = None
        try:
            version = locals()[version_var]
        except KeyError:
            if not hotfix:
                continue
        user = cs.user().decode("utf-8").split("<")[0].strip()
        entry = "%s - %s" % (description, user)
        if hotfix or entry not in versions[version]["changes"]:
            if hotfix:
                versions[hotfix] = {
                    "changes": [entry],
                    "date": _changeset_date(cs).strftime("%b %d, %Y"),
                }
            else:
                versions[version]["changes"].insert(0, entry)

    # Write out the changelog.
    with open(changelog_file, "w") as f:
        for version, version_info in versions.items():
            header = "Version %s (%s)" % (version, version_info["date"])
            f.write("%s\n" % header)
            f.write("%s\n" % ("-" * len(header)))
            f.write("\n")
            if version_info["changes"]:
                for change in version_info["changes"]:
                    f.write("  * %s\n" % change)
            else:
                f.write("  * No changes listed.\n")
            f.write("\n")
Ejemplo n.º 59
0
    def rows(self):
        # ordered keys with default values
        keys = SortedDict([
            ('catiName', None),
            ('followedUp', 0),
            ('noFollowUpAfter6Days', 0),
            ('waitlisted', 0),
            ('transferredToTeamLeader', 0),
            ('timedOut', 0),
            ('workingDays', 0),
            ('followUpTime', 0),
            ('avgTimePerFollowUp', None),
        ])

        db = get_db()

        startdate = self.datespan.startdate_param_utc[:10]
        enddate = self.datespan.enddate_param_utc[:10]

        def get_form_data(user_id):
            row = db.view(
                'hsph/cati_performance',
                startkey=["followUpForm", self.domain, user_id, startdate],
                endkey=["followUpForm", self.domain, user_id, enddate],
                reduce=True,
                wrapper=lambda r: r['value']).first() or {}

            if row.get('followUpTime'):

                def format(seconds):
                    return time.strftime('%M:%S', time.gmtime(seconds))

                row['avgTimePerFollowUp'] = format(row['followUpTime'] //
                                                   row['followUpForms'])
                row['followUpTime'] = format(row['followUpTime'])

            row['workingDays'] = len(
                set(
                    db.view(
                        'hsph/cati_performance',
                        startkey=[
                            "submissionDay", self.domain, user_id, startdate
                        ],
                        endkey=[
                            "submissionDay", self.domain, user_id, enddate
                        ],
                        reduce=False,
                        wrapper=lambda r: r['value']['submissionDay']).all()))
            return row

        def get_case_data(group_id):
            row = db.view('hsph/cati_performance',
                          startkey=["all", self.domain, group_id, startdate],
                          endkey=["all", self.domain, group_id, enddate],
                          reduce=True,
                          wrapper=lambda r: r['value']).first() or {}

            # These queries can fail if startdate is less than N days before
            # enddate.  We just catch and supply a default value.
            extra_keys = [
                ('noFollowUpAfter6Days', 0, 13),
                ('timedOut', 0, 21),
            ]
            for key in extra_keys:
                key, default, days = key
                try:
                    row[key] = db.view(
                        'hsph/cati_performance',
                        startkey=[key, self.domain, group_id, startdate],
                        endkey=[
                            key, self.domain, group_id,
                            datestring_minus_days(enddate, days)
                        ],
                        reduce=True,
                        wrapper=lambda r: r['value'][key]).first()
                except restkit.errors.RequestFailed:
                    row[key] = default
            return row

        def sum_dicts(*args):
            res = {}
            for d in args:
                for k, v in d.items():
                    res[k] = res.get(k, 0) + (v or 0)
            return res

        rows = []
        cati_finder = CATIFinder(self.domain)

        for data in cati_finder.get_cati_users_data():
            user = data['user']
            row = get_form_data(user._id)
            row.update(
                sum_dicts(*[get_case_data(id) for id in data['group_ids']]))
            row['catiName'] = self.table_cell(user.raw_username,
                                              user.username_in_report)

            list_row = []
            for k, v in keys.items():
                val = row.get(k, v)
                if val is None:
                    val = '---'
                list_row.append(numeric_cell(val))

            rows.append(list_row)

        return rows
Ejemplo n.º 60
0
def get_sql_for_new_models(apps=None, using=DEFAULT_DB_ALIAS):
    """
    Unashamedly copied and tweaked from django.core.management.commands.syncdb
    """
    connection = connections[using]
    
    # Get a list of already installed *models* so that references work right.
    tables = connection.introspection.table_names()
    seen_models = connection.introspection.installed_models(tables)
    created_models = set()
    pending_references = {}
    
    if apps:
        apps = [models.get_app(a) for a in apps]
    else:
        apps = models.get_apps()
    
    # Build the manifest of apps and models that are to be synchronized
    all_models = [
        (app.__name__.split('.')[-2], [
            m
            for m in models.get_models(app, include_auto_created=True)
            if router.allow_syncdb(using, m)
        ])
        for app in apps
    ]
    
    def model_installed(model):
        opts = model._meta
        converter = connection.introspection.table_name_converter
        db_table_in = (converter(opts.db_table) in tables)
        auto_create_in = (
            opts.auto_created and
            converter(opts.auto_created._meta.db_table) in tables
        )
        return not (db_table_in or auto_create_in)
    
    manifest = SortedDict(
        (app_name, filter(model_installed, model_list))
        for app_name, model_list in all_models
    )
    
    statements = []
    sql = None
    for app_name, model_list in manifest.items():
        for model in model_list:
            # Create the model's database table, if it doesn't already exist.
            sql, references = connection.creation.sql_create_model(
                model,
                no_style(),
                seen_models
            )
            
            seen_models.add(model)
            created_models.add(model)
            statements.append("### New Model: %s.%s" % (
                app_name,
                str(model).replace("'>", "").split(".")[-1]
            ))
            
            for refto, refs in references.items():
                pending_references.setdefault(refto, []).extend(refs)
                if refto in seen_models:
                    sql.extend(
                        connection.creation.sql_for_pending_references(
                            refto,
                            no_style(),
                            pending_references
                        )
                    )
            
            sql.extend(
                connection.creation.sql_for_pending_references(
                    model,
                    no_style(),
                    pending_references
                )
            )
            statements.extend(sql)
    
    custom_sql = None
    for app_name, model_list in manifest.items():
        for model in model_list:
            if model in created_models:
                custom_sql = custom_sql_for_model(
                    model,
                    no_style(),
                    connection
                )
                
                if custom_sql:
                    statements.extend(custom_sql)
    
    index_sql = None
    for app_name, model_list in manifest.items():
        for model in model_list:
            if model in created_models:
                index_sql = connection.creation.sql_indexes_for_model(
                    model,
                    no_style()
                )
                
                if index_sql:
                    statements.extend(index_sql)
    
    return statements