def _fill_related_objects_cache(self): cache = SortedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_objects_with_model( include_hidden=True): if (obj.field.creation_counter < 0 or obj.field.rel.parent_link ) and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model # Collect also objects which are in relation to some proxy child/parent of self. proxy_cache = cache.copy() for klass in get_models(include_auto_created=True, only_installed=False): if not klass._meta.swapped: for f in klass._meta.local_fields: if f.rel and not isinstance(f.rel.to, six.string_types): if self == f.rel.to._meta: cache[f.related] = None proxy_cache[f.related] = None elif self.concrete_model == f.rel.to._meta.concrete_model: proxy_cache[f.related] = None self._related_objects_cache = cache self._related_objects_proxy_cache = proxy_cache
def _fill_related_objects_cache(self): cache = SortedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True): if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model # Collect also objects which are in relation to some proxy child/parent of self. proxy_cache = cache.copy() for klass in get_models(include_auto_created=True, only_installed=False): if not klass._meta.swapped: # 遍历所有的表内属性 for f in klass._meta.local_fields: if f.rel and not isinstance(f.rel.to, six.string_types): # 如果属性所关联的表是自己 if self == f.rel.to._meta: cache[RelatedObject(f.rel.to, klass, f)] = None proxy_cache[RelatedObject(f.rel.to, klass, f)] = None # 关于 self.concrete_model 参见 self.__init__() 中的说明 elif self.concrete_model == f.rel.to._meta.concrete_model: proxy_cache[RelatedObject(f.rel.to, klass, f)] = None # 从上面来看, cache 记录的是一个键为 RelatedObject, 值为 None 的映射. 而 RelatedObject 中记录了属性关联表的信息. self._related_objects_cache = cache self._related_objects_proxy_cache = proxy_cache
def export(self, queryset=None): """ Exports a resource. """ rows = [] if queryset is None: queryset = self.get_queryset() fields = SortedDict() for pca in queryset.iterator(): self.fill_row(pca, fields) self.headers = fields # Iterate without the queryset cache, to avoid wasting memory when # exporting large datasets. for pca in queryset.iterator(): # second pass creates rows from the known table shape row = fields.copy() self.fill_row(pca, row) rows.append(row) data = tablib.Dataset(headers=fields.keys()) for row in rows: data.append(row.values()) return data
def _fill_related_objects_cache(self): cache = SortedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_objects_with_model( include_hidden=True): if (obj.field.creation_counter < 0 or obj.field.rel.parent_link ) and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model # Collect also objects which are in relation to some proxy child/parent of self. proxy_cache = cache.copy() for klass in get_models( include_auto_created=True, only_installed=False): if not klass._meta.swapped: for f in klass._meta.local_fields: if f.rel and not isinstance(f.rel.to, six.string_types): if self == f.rel.to._meta: cache[RelatedObject(f.rel.to, klass, f)] = None proxy_cache[RelatedObject(f.rel.to, klass, f)] = None elif self.concrete_model == f.rel.to._meta.concrete_model: proxy_cache[RelatedObject(f.rel.to, klass, f)] = None self._related_objects_cache = cache self._related_objects_proxy_cache = proxy_cache
def dashboard(request): if request.user.groups.filter(name__in=['SPDT-Servidores', 'SSPLF']).count() <= 0: raise PermissionDenied desembolsos_max = 0 matriz = SortedDict() dados = SortedDict() projetos = Projeto.objects.all() meses = Desembolso.objects.dates('data', 'month', 'DESC')[:6] colors = ['ffff00', 'cc7900', 'ff0000', '92d050', '006600', '0097cc', '002776', 'ae78d6', 'ff00ff', '430080', '28d75c', '0000ff', 'fff200'] for date in reversed(meses): mes_ano = '%s/%s' % (date.month, date.year) dados[mes_ano] = 0 for p in projetos: matriz[p.id] = (p.sigla, dados.copy()) for date in meses: mes_ano = '%s/%s' % (date.month, date.year) for d in Desembolso.objects.filter(data__year=date.year, data__month=date.month).values('projeto').annotate(total_dolar=Sum('valor_dolar')): if int(d['total_dolar']) > desembolsos_max: desembolsos_max = int(d['total_dolar']) matriz[d['projeto']][1][mes_ano] += int(d['total_dolar']) meses = ["%s/%s" % (m.month, m.year) for m in reversed(meses)] extra_context = {'desembolsos': matriz, 'desembolsos_max': desembolsos_max, 'meses': meses, 'colors': ','.join(colors[:len(matriz)])} return render_to_response('metas/dashboard.html', extra_context, context_instance=RequestContext(request))
def download_selected_searchrequests(self, request, searchrequests): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s' \ % self.get_download_filename(request) writer = CsvUnicodeWriter(response, delimiter=',') meta_data = (('id', _('Search request number')), ('num_items', _('Number of items')), ('state', _('Status')), ('owner', _('Owner')), ('creation_date', _('Creation date')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for searchrequest in searchrequests: row = columns.copy() row['id'] = searchrequest.id row['creation_date'] = format_datetime(searchrequest.date_created, 'DATETIME_FORMAT') row['state'] = searchrequest.state row['num_items'] = searchrequest.num_items row['owner'] = searchrequest.owner.email encoded_values = [six.text_type(value).encode('utf8') for value in row.values()] writer.writerow(encoded_values) return response
def download_selected_quotes(self, request, quotes): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s' \ % self.get_download_filename(request) writer = CsvUnicodeWriter(response, delimiter=',') meta_data = (('id', _('Quote number')), ('search_request', _('Search request')), ('state', _('Status')), ('owner', _('Owner')), ('date_created', _('Creation date')), ('base_total_excl_tax', _('Base total (excl tax)')), ('base_total_incl_tax', _('Base total (inc tax)')), ('shipping_total_excl_tax', _('Shipping total (excl tax)')), ('shipping_total_inc_tax', _('Shipping total (incl tax)')), ('grand_total_excl_tax', _('Grand total (excl tax)')), ('grand_total_inc_tax', _('Grand total (incl tax)')), ('warranty_days', _('Warranty days')), ('shipping_days', _('Shipping days')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for quote in quotes: row = columns.copy() row['id'] = quote.id row['search_request'] = quote.search_request row['state'] = quote.state row['owner'] = quote.owner.email row['date_created'] = format_datetime(quote.date_created, 'DATETIME_FORMAT') row['date_updated'] = format_datetime(quote.date_updated, 'DATETIME_FORMAT') row['base_total_excl_tax'] = quote.base_total_excl_tax row['base_total_incl_tax'] = quote.base_total_incl_tax row['shipping_total_excl_tax'] = quote.shipping_total_excl_tax row['shipping_total_incl_tax'] = quote.shipping_total_incl_tax row['grand_total_excl_tax'] = quote.grand_total_excl_tax row['grand_total_incl_tax'] = quote.grand_total_incl_tax row['warranty_days'] = quote.warranty_days row['shipping_days'] = quote.shipping_days encoded_values = [six.text_type(value).encode('utf8') for value in row.values()] writer.writerow(encoded_values) return response
def download_selected_orders(self, request, orders): response = HttpResponse(mimetype='text/csv') response[ 'Content-Disposition'] = 'attachment; filename=%s' % self.get_download_filename( request) writer = CsvUnicodeWriter(response, delimiter=',') meta_data = ( ('number', _('Order number')), ('value', _('Order value')), ('date', _('Date of purchase')), ('num_items', _('Number of items')), ('status', _('Order status')), ('customer', _('Customer email address')), ('shipping_address', _('Shipping address')), ('billing_address', _('Billing address')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for order in orders: row = columns.copy() row['number'] = order.number row['value'] = order.total_incl_tax row['date'] = format_date(order.date_placed, 'DATETIME_FORMAT') row['num_items'] = order.num_items row['status'] = order.status row['customer'] = order.email if order.shipping_address: row['shipping_address'] = order.shipping_address else: row['shipping_address'] = '' if order.billing_address: row['billing_address'] = order.billing_address else: row['billing_address'] = '' encoded_values = [ unicode(value).encode('utf8') for value in row.values() ] writer.writerow(encoded_values) return response
def download_selected_invoices(self, request, invoices): response = http.HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s' \ % self.get_download_filename(request) writer = UnicodeCSVWriter(open_file=response) meta_data = ( ('number', _('Invoice number')), ('value', _('Invoice value')), ('date', _('Date of purchase')), ('order_number', _('Order number')), ('num_items', _('Number of items')), ('status', _('Invoice status')), ('customer', _('Customer email address')), ('person_name', _('Pay as juristic person')), ('person_vatin', _('Juristic person VAT number')), ('person_code', _('Juristic person code (e.g. KPP in Russia)')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for invoice in invoices: row = columns.copy() row['number'] = invoice.number row['value'] = invoice.total_incl_tax row['date'] = format_datetime(invoice.date_created, 'DATETIME_FORMAT') row['order_number'] = invoice.order_number row['num_items'] = invoice.order.num_items row['status'] = invoice.status row['customer'] = invoice.order.email if invoice.person: row['person_name'] = invoice.person.name row['person_vatin'] = invoice.person.vatin row['person_code'] = invoice.person.reason_code else: row['person_name'] = '<none>' row['person_vatin'] = '<none>' row['person_code'] = '<none>' writer.writerow(row.values()) return response
def dashboard(request): if request.user.groups.filter( name__in=['SPDT-Servidores', 'SSPLF']).count() <= 0: raise PermissionDenied desembolsos_max = 0 matriz = SortedDict() dados = SortedDict() projetos = Projeto.objects.all() meses = Desembolso.objects.dates('data', 'month', 'DESC')[:6] colors = [ 'ffff00', 'cc7900', 'ff0000', '92d050', '006600', '0097cc', '002776', 'ae78d6', 'ff00ff', '430080', '28d75c', '0000ff', 'fff200' ] for date in reversed(meses): mes_ano = '%s/%s' % (date.month, date.year) dados[mes_ano] = 0 for p in projetos: matriz[p.id] = (p.sigla, dados.copy()) for date in meses: mes_ano = '%s/%s' % (date.month, date.year) for d in Desembolso.objects.filter( data__year=date.year, data__month=date.month).values('projeto').annotate( total_dolar=Sum('valor_dolar')): if int(d['total_dolar']) > desembolsos_max: desembolsos_max = int(d['total_dolar']) matriz[d['projeto']][1][mes_ano] += int(d['total_dolar']) meses = ["%s/%s" % (m.month, m.year) for m in reversed(meses)] extra_context = { 'desembolsos': matriz, 'desembolsos_max': desembolsos_max, 'meses': meses, 'colors': ','.join(colors[:len(matriz)]) } return render_to_response('metas/dashboard.html', extra_context, context_instance=RequestContext(request))
def download_selected_orders(self, request, orders): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s' \ % self.get_download_filename(request) writer = CsvUnicodeWriter(response, delimiter=',') meta_data = (('number', _('Order number')), ('value', _('Order value')), ('date', _('Date of purchase')), ('num_items', _('Number of items')), ('status', _('Order status')), ('customer', _('Customer email address')), ('shipping_address_name', _('Deliver to name')), ('billing_address_name', _('Bill to name')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for order in orders: row = columns.copy() row['number'] = order.number row['value'] = order.total_incl_tax row['date'] = format_datetime(order.date_placed, 'DATETIME_FORMAT') row['num_items'] = order.num_items row['status'] = order.status row['customer'] = order.email if order.shipping_address: row['shipping_address_name'] = order.shipping_address.name else: row['shipping_address_name'] = '' if order.billing_address: row['billing_address_name'] = order.billing_address.name else: row['billing_address_name'] = '' encoded_values = [six.text_type(value).encode('utf8') for value in row.values()] writer.writerow(encoded_values) return response
def download_selected_invoices(self, request, invoices): response = http.HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s' \ % self.get_download_filename(request) writer = UnicodeCSVWriter(open_file=response) meta_data = (('number', _('Invoice number')), ('value', _('Invoice value')), ('date', _('Date of purchase')), ('order_number', _('Order number')), ('num_items', _('Number of items')), ('status', _('Invoice status')), ('customer', _('Customer email address')), ('person_name', _('Pay as juristic person')), ('person_vatin', _('Juristic person VAT number')), ('person_code', _('Juristic person code (e.g. KPP in Russia)')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for invoice in invoices: row = columns.copy() row['number'] = invoice.number row['value'] = invoice.total_incl_tax row['date'] = format_datetime(invoice.date_created, 'DATETIME_FORMAT') row['order_number'] = invoice.order_number row['num_items'] = invoice.order.num_items row['status'] = invoice.status row['customer'] = invoice.order.email if invoice.person: row['person_name'] = invoice.person.name row['person_vatin'] = invoice.person.vatin row['person_code'] = invoice.person.reason_code else: row['person_name'] = '<none>' row['person_vatin'] = '<none>' row['person_code'] = '<none>' writer.writerow(row.values()) return response
def download_selected_orders(self, request, orders): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s' \ % self.get_download_filename(request) writer = UnicodeCSVWriter(open_file=response) meta_data = ( ('number', _('Order number')), ('value', _('Order value')), ('date', _('Date of purchase')), ('num_items', _('Number of items')), ('status', _('Order status')), ('customer', _('Customer email address')), ('shipping_address_name', _('Deliver to name')), ('billing_address_name', _('Bill to name')), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for order in orders: row = columns.copy() row['number'] = order.number row['value'] = order.total_incl_tax row['date'] = format_datetime(order.date_placed, 'DATETIME_FORMAT') row['num_items'] = order.num_items row['status'] = order.status row['customer'] = order.email if order.shipping_address: row['shipping_address_name'] = order.shipping_address.name else: row['shipping_address_name'] = '' if order.billing_address: row['billing_address_name'] = order.billing_address.name else: row['billing_address_name'] = '' writer.writerow(row) return response
def download_selected_orders(self, request, orders): response = HttpResponse(content_type="text/csv") response["Content-Disposition"] = "attachment; filename=%s" % self.get_download_filename(request) writer = UnicodeCSVWriter(open_file=response) meta_data = ( ("number", _("Order number")), ("value", _("Order value")), ("date", _("Date of purchase")), ("num_items", _("Number of items")), ("status", _("Order status")), ("customer", _("Customer email address")), ("shipping_address_name", _("Deliver to name")), ("billing_address_name", _("Bill to name")), ) columns = SortedDict() for k, v in meta_data: columns[k] = v writer.writerow(columns.values()) for order in orders: row = columns.copy() row["number"] = order.number row["value"] = order.total_incl_tax row["date"] = format_datetime(order.date_placed, "DATETIME_FORMAT") row["num_items"] = order.num_items row["status"] = order.status row["customer"] = order.email if order.shipping_address: row["shipping_address_name"] = order.shipping_address.name else: row["shipping_address_name"] = "" if order.billing_address: row["billing_address_name"] = order.billing_address.name else: row["billing_address_name"] = "" writer.writerow(row) return response
def download_selected_orders(self, request, orders): response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachment; filename=orders.csv' writer = csv.writer(response, delimiter=',') meta_data = (('number', 'Order number'), ('value', 'Order value'), ('date', 'Date of purchase'), ('num_items', 'Number of items'), ('status', 'Order status'), ('shipping_address_name', 'Deliver to name'), ('billing_address_name', 'Bill to name'), ) columns = SortedDict() for k,v in meta_data: columns[k] = v writer.writerow(columns.values()) for order in orders: row = columns.copy() row['number'] = order.number row['value'] = order.total_incl_tax row['date'] = order.date_placed row['num_items'] = order.num_items row['status'] = order.status if order.shipping_address: row['shipping_address_name'] = order.shipping_address.name() else: row['shipping_address_name'] = '' if order.billing_address: row['billing_address_name'] = order.billing_address.name() else: row['billing_address_name'] = '' encoded_values = [unicode(value).encode('utf8') for value in row.values()] writer.writerow(encoded_values) return response
) return query, params def _format_row(self, row): return _format_row_with_out_of_dateness(self.locale, *row) # L10n Dashboard tables that have their own whole-page views: L10N_READOUTS = SortedDict( (t.slug, t) for t in [MostVisitedTranslationsReadout, TemplateTranslationsReadout, UnreviewedReadout]) # Contributors ones: CONTRIBUTOR_READOUTS = SortedDict( (t.slug, t) for t in [MostVisitedDefaultLanguageReadout, TemplateReadout, HowToContributeReadout, AdministrationReadout, UnreviewedReadout, NeedsChangesReadout, UnreadyForLocalizationReadout, UnhelpfulReadout]) # All: READOUTS = L10N_READOUTS.copy() READOUTS.update(CONTRIBUTOR_READOUTS) GROUP_L10N_READOUTS = SortedDict( (t.slug, t) for t in [MostVisitedTranslationsReadout, UnreviewedReadout]) # English group locale is the same as l10n dashboard. GROUP_CONTRIBUTOR_READOUTS = CONTRIBUTOR_READOUTS
class SortedDictTests(SimpleTestCase): def setUp(self): super(SortedDictTests, self).setUp() self.d1 = SortedDict() self.d1[7] = "seven" self.d1[1] = "one" self.d1[9] = "nine" self.d2 = SortedDict() self.d2[1] = "one" self.d2[9] = "nine" self.d2[0] = "nil" self.d2[7] = "seven" def test_basic_methods(self): self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9]) self.assertEqual(list(six.itervalues(self.d1)), ["seven", "one", "nine"]) self.assertEqual(list(six.iteritems(self.d1)), [(7, "seven"), (1, "one"), (9, "nine")]) def test_overwrite_ordering(self): """ Overwriting an item keeps its place. """ self.d1[1] = "ONE" self.assertEqual(list(six.itervalues(self.d1)), ["seven", "ONE", "nine"]) def test_append_items(self): """ New items go to the end. """ self.d1[0] = "nil" self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0]) def test_delete_and_insert(self): """ Deleting an item, then inserting the same key again will place it at the end. """ del self.d2[7] self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0]) self.d2[7] = "lucky number 7" self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7]) if six.PY2: def test_change_keys(self): """ Changing the keys won't do anything, it's only a copy of the keys dict. This test doesn't make sense under Python 3 because keys is an iterator. """ k = self.d2.keys() k.remove(9) self.assertEqual(self.d2.keys(), [1, 9, 0, 7]) def test_init_keys(self): """ Initialising a SortedDict with two keys will just take the first one. A real dict will actually take the second value so we will too, but we'll keep the ordering from the first key found. """ tuples = ((2, "two"), (1, "one"), (2, "second-two")) d = SortedDict(tuples) self.assertEqual(list(six.iterkeys(d)), [2, 1]) real_dict = dict(tuples) self.assertEqual(sorted(six.itervalues(real_dict)), ["one", "second-two"]) # Here the order of SortedDict values *is* what we are testing self.assertEqual(list(six.itervalues(d)), ["second-two", "one"]) def test_overwrite(self): self.d1[1] = "not one" self.assertEqual(self.d1[1], "not one") self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy()))) def test_append(self): self.d1[13] = "thirteen" self.assertEqual(repr(self.d1), "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}") def test_pop(self): self.assertEqual(self.d1.pop(1, "missing"), "one") self.assertEqual(self.d1.pop(1, "missing"), "missing") # We don't know which item will be popped in popitem(), so we'll # just check that the number of keys has decreased. l = len(self.d1) self.d1.popitem() self.assertEqual(l - len(self.d1), 1) def test_dict_equality(self): d = SortedDict((i, i) for i in range(3)) self.assertEqual(d, {0: 0, 1: 1, 2: 2}) def test_tuple_init(self): d = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}") def test_pickle(self): self.assertEqual(pickle.loads(pickle.dumps(self.d1, 2)), {7: "seven", 1: "one", 9: "nine"}) def test_copy(self): orig = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) copied = copy.copy(orig) self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2]) self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2]) def test_clear(self): self.d1.clear() self.assertEqual(self.d1, {}) self.assertEqual(self.d1.keyOrder, []) def test_reversed(self): self.assertEqual(list(self.d1), [7, 1, 9]) self.assertEqual(list(self.d2), [1, 9, 0, 7]) self.assertEqual(list(reversed(self.d1)), [9, 1, 7]) self.assertEqual(list(reversed(self.d2)), [7, 0, 9, 1])
PLATFORM_SUN.id: PLATFORM_SUN, PLATFORM_ALL_MOBILE.id: PLATFORM_ALL_MOBILE, PLATFORM_ANDROID.id: PLATFORM_ANDROID, PLATFORM_MAEMO.id: PLATFORM_MAEMO } MOBILE_PLATFORMS = SortedDict([(PLATFORM_ALL_MOBILE.id, PLATFORM_ALL_MOBILE), (PLATFORM_ANDROID.id, PLATFORM_ANDROID), (PLATFORM_MAEMO.id, PLATFORM_MAEMO)]) DESKTOP_PLATFORMS = SortedDict([(PLATFORM_ALL.id, PLATFORM_ALL), (PLATFORM_LINUX.id, PLATFORM_LINUX), (PLATFORM_MAC.id, PLATFORM_MAC), (PLATFORM_WIN.id, PLATFORM_WIN)]) SUPPORTED_PLATFORMS = DESKTOP_PLATFORMS.copy() SUPPORTED_PLATFORMS.update(MOBILE_PLATFORMS) PLATFORM_DICT = { 'all': PLATFORM_ALL, 'linux': PLATFORM_LINUX, 'mac': PLATFORM_MAC, 'macosx': PLATFORM_MAC, 'darwin': PLATFORM_MAC, 'bsd': PLATFORM_BSD, 'bsd_os': PLATFORM_BSD, 'win': PLATFORM_WIN, 'winnt': PLATFORM_WIN, 'windows': PLATFORM_WIN, 'sun': PLATFORM_SUN, 'sunos': PLATFORM_SUN,
class Chart(object): BASE = "http://chart.apis.google.com/chart" defaults = {"chs": "200x200", "cht": "lc"} def __init__(self): # Use a SortedDict for the opeions so they are added in a # deterministic manner; this eases things like dealing with cache keys # or writing unit tests. self.options = SortedDict() self.datasets = [] self.axes = [] self.datarange = None self.alt = None def clone(self): clone = self.__class__() clone.options = self.options.copy() clone.datasets = self.datasets[:] clone.axes = self.axes[:] return clone def img(self): url = self.url() width, height = self.options["chs"].split("x") if self.alt: alt = 'alt="%s" ' % escape(self.alt) else: alt = '' s = mark_safe('<img src="%s" width="%s" height="%s" %s/>' % (escape(url), width, height, alt)) return s def url(self): if self.options.get('cht', None) == 't': self.datasets.append(self.options.pop("_mapdata")) # Figure out the chart's data range if not self.datarange: if self.datasets == [[]]: maxvalue = 0 minvalue = 0 else: maxvalue = max(max(d) for d in self.datasets if d) minvalue = min(min(d) for d in self.datasets if d) self.datarange = (minvalue, maxvalue) # Encode data if "chds" in self.options or self.options.get('cht', None) == 'gom': # text encoding if scaling provided, or for google-o-meter type data = "|".join(encode_text(d) for d in self.datasets) encoded_data = "t:%s" % data else: # extended encoding otherwise data = extended_separator.join( encode_extended(d, self.datarange) for d in self.datasets) encoded_data = "e:%s" % data # Update defaults for k in self.defaults: if k not in self.options: self.options[k] = self.defaults[k] # Start to calcuate the URL url = "%s?%s&chd=%s" % (self.BASE, urlencode( self.options), encoded_data) # Calculate axis options if self.axes: axis_options = SortedDict() axis_sides = [] for i, axis in enumerate(self.axes): axis_sides.append(axis.side) for opt in axis.options: axis_options.setdefault(opt, []).append(axis.options[opt] % i) # Turn the option lists into strings axis_sides = smart_join(",", *axis_sides) for opt in axis_options: axis_options[opt] = smart_join("|", *axis_options[opt]) url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options)) return url
def map_sum(request): # Filtrar Casas de acordo com os parâmetros param = get_params(request) casas = filtrar_casas(**param) # Montar registros de totalização tot_servicos = SortedDict() tot_projetos = SortedDict() tot_diagnosticos = SortedDict() for ts in TipoServico.objects.all(): tot_servicos[ts.sigla] = 0 for pr in Projeto.objects.all(): tot_projetos[pr.sigla] = 0 tot_convenios = tot_projetos.copy() tot_equipadas = tot_projetos.copy() tot_diagnosticos['A'] = 0 tot_diagnosticos['P'] = 0 # Montar as linhas do array de resultados com as regiões e os estados result = {} for uf in UnidadeFederativa.objects.filter(Q(regiao__in=param['regioes']) | Q(sigla__in=param['estados'])).order_by('regiao', 'nome'): if uf.regiao not in result: result[uf.regiao] = {'nome': uf.get_regiao_display(), 'ufs': {}, 'servicos': tot_servicos.copy(), 'convenios': tot_projetos.copy(), 'equipadas': tot_projetos.copy(), 'diagnosticos': tot_diagnosticos.copy()} result[uf.regiao]['ufs'][uf.codigo_ibge] = {'nome': uf.nome, 'servicos': tot_servicos.copy(), 'convenios': tot_projetos.copy(), 'equipadas': tot_projetos.copy(), 'diagnosticos': tot_diagnosticos.copy()} # Processar as casas filtradas for casa in casas.distinct(): uf = casa.municipio.uf for s in casa.servico_set.all(): tot_servicos[s.tipo_servico.sigla] += 1 result[uf.regiao]['servicos'][s.tipo_servico.sigla] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['servicos'][s.tipo_servico.sigla] += 1 for c in casa.convenio_set.all(): tot_convenios[c.projeto.sigla] += 1 result[uf.regiao]['convenios'][c.projeto.sigla] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['convenios'][c.projeto.sigla] += 1 if (c.equipada and c.data_termo_aceite is not None): tot_equipadas[c.projeto.sigla] += 1 result[uf.regiao]['equipadas'][c.projeto.sigla] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['equipadas'][c.projeto.sigla] += 1 for d in casa.diagnostico_set.all(): if d.publicado: tot_diagnosticos['P'] += 1 result[uf.regiao]['diagnosticos']['P'] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['diagnosticos']['P'] += 1 else: tot_diagnosticos['A'] += 1 result[uf.regiao]['diagnosticos']['A'] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['diagnosticos']['A'] += 1 extra_context = { 'pagesize': 'a4 landscape', 'servicos': TipoServico.objects.all(), 'projetos': Projeto.objects.all(), 'result': result, 'tot_servicos': tot_servicos, 'tot_convenios': tot_convenios, 'tot_equipadas': tot_equipadas, 'tot_diagnosticos': tot_diagnosticos, } return render_to_pdf('metas/map_sum.html', extra_context)
def make_missing_data_form(instance, required_fields=[]): fields = SortedDict({ 'is_professional': forms.BooleanField( label=_(u"Professionnel"), required=False, initial=False, widget=CommentedCheckboxInput(info_text='Je suis professionnel')), 'company_name': forms.CharField(label=_(u"Nom de la société"), required=False, max_length=255, widget=forms.TextInput(attrs={'class': 'inm'})), 'username': forms.RegexField( label=_(u"Pseudo"), max_length=30, regex=r'^[\w.@+-]+$', help_text= _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only." ), error_messages={ 'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters." ) }, widget=forms.TextInput(attrs={'class': 'inm'})), 'password1': forms.CharField(label=_(u"Mot de passe"), max_length=128, required=True, widget=forms.PasswordInput(attrs={'class': 'inm'})), 'password2': forms.CharField(label=_(u"Mot de passe à nouveau"), max_length=128, required=True, widget=forms.PasswordInput(attrs={'class': 'inm'})), 'first_name': forms.CharField(label=_(u"Prénom"), max_length=30, required=True, widget=forms.TextInput(attrs={'class': 'inm'})), 'last_name': forms.CharField(label=_(u"Nom"), max_length=30, required=True, widget=forms.TextInput(attrs={'class': 'inm'})), 'addresses__address1': forms.CharField(label=_(u"Rue"), max_length=255, widget=forms.Textarea(attrs={'class': 'inm street'})), 'addresses__zipcode': forms.CharField( label=_(u"Code postal"), required=True, max_length=9, widget=forms.TextInput(attrs={'class': 'inm zipcode'})), 'addresses__city': forms.CharField(label=_(u"Ville"), required=True, max_length=255, widget=forms.TextInput(attrs={'class': 'inm town'})), 'addresses__country': forms.ChoiceField(label=_(u"Pays"), choices=COUNTRY_CHOICES, required=True, widget=forms.Select(attrs={'class': 'selm'})), 'avatar': forms.ImageField(required=False, label=_(u"Photo de profil")), 'phones__phone': PhoneNumberField(label=_(u"Téléphone"), required=True, widget=forms.TextInput(attrs={'class': 'inm'})), 'drivers_license_number': forms.CharField(label=_(u'Numéro de permis'), max_length=32), 'drivers_license_date': DateSelectField(label=_(u'Date de délivraisance')), 'date_of_birth': DateSelectField(label=_(u'Date de naissance')), 'place_of_birth': forms.CharField(label=_(u'Lieu de naissance'), max_length=255), 'cvv': forms.CharField( max_length=4, label=_(u'Cryptogramme de sécurité'), help_text=_(u'Les 3 derniers chiffres au dos de la carte.')), 'expires': ExpirationField(label=_(u'Date d\'expiration')), 'holder_name': forms.CharField(label=_(u'Titulaire de la carte')), 'card_number': CreditCardField(label=_(u'Numéro de carte de crédit')), 'godfather_email': forms.EmailField( label=_(u'Email de votre parrain'), required=False, help_text= _(u'Commissions offertes pendant 3 mois si vous êtes parrainé par membre e-loue. Offre valable entre le 18 avril et le 30 avril 2013.' )), }) # Are we in presence of a pro ? if fields.has_key('is_professional'): if instance and getattr(instance, 'is_professional', None) != None: del fields['is_professional'] del fields['company_name'] # Do we have an address ? if instance and instance.addresses.exists(): fields['addresses'] = forms.ModelChoiceField( label=_(u"Adresse"), required=False, queryset=instance.addresses.all(), initial=instance.default_address if instance.default_address else instance.addresses.all()[0], widget=forms.Select(attrs={'class': 'selm'}), help_text=_(u"Selectionnez une adresse enregistrée précédemment")) for f in fields.keys(): if "addresses" in f: fields[f].required = False # Do we have a phone number ? if instance and instance.phones.exists(): fields['phones'] = forms.ModelChoiceField( label=_(u"Téléphone"), required=False, queryset=instance.phones.all(), initial=instance.phones.all()[0], widget=forms.Select(attrs={'class': 'selm'}), help_text=_( u"Selectionnez un numéro de téléphone enregistré précédemment") ) if fields.has_key('phones__phone'): fields['phones__phone'].required = False # Do we have a password ? if fields.has_key('password1'): if instance and getattr(instance, 'password', None): del fields['password1'] del fields['password2'] if instance and instance.username and "first_name" not in required_fields: del fields['avatar'] if instance: try: if instance.creditcard: del fields['cvv'] del fields['expires'] del fields['holder_name'] del fields['card_number'] except CreditCard.DoesNotExist: pass for f in fields.keys(): if required_fields and f not in required_fields: del fields[f] continue if "__" in f or f in ["addresses", "phones", "password"]: continue if hasattr(instance, f) and getattr(instance, f): del fields[f] def save(self): for attr, value in self.cleaned_data.iteritems(): if attr == "password1": self.instance.set_password(value) if "addresses" not in attr and "phones" not in attr: # wtf is this checking? setattr(self.instance, attr, value) if 'addresses' in self.cleaned_data and self.cleaned_data['addresses']: address = self.cleaned_data['addresses'] elif 'addresses__address1' in self.cleaned_data: address = self.instance.addresses.create( address1=self.cleaned_data['addresses__address1'], zipcode=self.cleaned_data['addresses__zipcode'], city=self.cleaned_data['addresses__city'], country=self.cleaned_data['addresses__country']) self.instance.default_address = address else: address = None if 'phones' in self.cleaned_data and self.cleaned_data['phones']: phone = self.cleaned_data['phones'] elif 'phones__phone' in self.cleaned_data: phone = self.instance.phones.create( number=self.cleaned_data['phones__phone']) else: phone = None if self.cleaned_data.get('card_number'): pm = PayboxManager() subscriber_reference = uuid.uuid4().hex self.cleaned_data['card_number'] = pm.subscribe( subscriber_reference, self.cleaned_data['card_number'], self.cleaned_data['expires'], self.cleaned_data['cvv']) credit_card = CreditCard.objects.create( subscriber_reference=subscriber_reference, masked_number=self.cleaned_data['masked_number'], card_number=self.cleaned_data['card_number'], holder_name=self.cleaned_data['holder_name'], expires=self.cleaned_data['expires'], holder=self.instance, keep=True) else: credit_card = None self.instance.save() return self.instance, address, phone, credit_card def clean_username(self): if Patron.objects.filter( username=self.cleaned_data['username']).exists(): raise forms.ValidationError( _(u"Ce nom d'utilisateur est déjà pris.")) if Patron.objects.filter( slug=slugify(self.cleaned_data['username'])).exists(): raise forms.ValidationError( _(u"Ce nom d'utilisateur est déjà pris.")) return self.cleaned_data['username'] def clean_addresses(self): addresses = self.cleaned_data['addresses'] address1 = self.cleaned_data['addresses__address1'] zipcode = self.cleaned_data['addresses__zipcode'] city = self.cleaned_data['addresses__city'] country = self.cleaned_data['addresses__country'] if not addresses and not (address1 and zipcode and city and country): raise forms.ValidationError(_(u"Vous devez spécifiez une adresse")) return self.cleaned_data['addresses'] def clean_company_name(self): is_professional = self.cleaned_data.get('is_professional') company_name = self.cleaned_data.get('company_name', None) if is_professional and not company_name: raise forms.ValidationError( _(u"Vous devez entrer le nom de votre société")) return company_name def clean_phones(self): phones = self.cleaned_data['phones'] phone = self.cleaned_data['phones__phone'] if not phones and not phone: raise forms.ValidationError( _(u"Vous devez spécifiez un numéro de téléphone")) return phones def clean(self): if self.errors: return self.cleaned_data if self.cleaned_data.get('card_number'): try: pm = PayboxManager() self.cleaned_data['masked_number'] = mask_card_number( self.cleaned_data['card_number']) pm.authorize(self.cleaned_data['card_number'], self.cleaned_data['expires'], self.cleaned_data['cvv'], 1, 'verification') except PayboxException: raise forms.ValidationError( _(u'La validation de votre carte a échoué.')) # testing passwords against each other: password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 != password2: msg = _(u"Vos mots de passe ne correspondent pas") self._errors['password1'] = [msg] self._errors['password2'] = [msg] del self.cleaned_data['password1'] del self.cleaned_data['password2'] return self.cleaned_data class Meta: fieldsets = [ ('member', { 'fields': [ 'is_professional', 'company_name', 'username', 'password1', 'password2', 'first_name', 'last_name', 'avatar', 'godfather_email', 'date_of_birth', 'place_of_birth' ], 'legend': 'Vous' }), ('driver_info', { 'fields': ['drivers_license_number', 'drivers_license_date'], 'legend': _(u'Permis de conduire') }), ('contacts', { 'fields': [ 'addresses', 'addresses__address1', 'addresses__zipcode', 'addresses__city', 'addresses__country', 'phones', 'phones__phone' ], 'legend': 'Vos coordonnées' }), ('payment', { 'fields': [ 'cvv', 'expires', 'holder_name', 'card_number', ], 'legend': 'Vos coordonnées bancaires' }), ] class_dict = fields.copy() class_dict.update({'instance': instance, 'Meta': Meta}) form_class = type('MissingInformationForm', (BetterForm, ), class_dict) form_class.save = types.MethodType(save, None, form_class) form_class.clean = types.MethodType(clean, None, form_class) form_class.clean_username = types.MethodType(clean_username, None, form_class) form_class.clean_phones = types.MethodType(clean_phones, None, form_class) form_class.clean_addresses = types.MethodType(clean_addresses, None, form_class) form_class.clean_company_name = types.MethodType(clean_company_name, None, form_class) return fields != {}, form_class
class Chart(object): BASE = "http://chart.apis.google.com/chart" defaults = { "chs": "200x200", "cht": "lc" } def __init__(self): # Use a SortedDict for the options so they are added in a # deterministic manner; this eases things like dealing with cache keys # or writing unit tests. self.options = SortedDict() self.datasets = [] self.hidden_datasets = [] self.axes = [] self.datarange = None self.alt = None def clone(self): clone = self.__class__() clone.options = self.options.copy() clone.datasets = self.datasets[:] clone.hidden_datasets = self.hidden_datasets[:] clone.axes = self.axes[:] return clone def img(self, color_override = None): orig_colors = self.options.get('chco') # If color_override is set, replace the chco option with this color if color_override is not None: final_color = [] for c in self.options['chco'].split(','): if c == color_override: c = orig_colors.split(',')[0] else: c = _chart_inactive_color final_color.append(c) self.options['chco'] = ','.join(final_color) url = self.url() if orig_colors: self.options['chco'] = orig_colors width, height = self.options["chs"].split("x") if self.alt: alt = '%s' % escape(self.alt) else: alt = '' s = mark_safe('<img src="%s" width="%s" height="%s" alt="%s" />' % (escape(url), width, height, alt)) return s def url(self): if self.options.get('cht', None) == 't': self.datasets.append(self.options.pop("_mapdata")) # Figure out the chart's data range if not self.datarange: maxvalue = max(max(d) for d in chain(self.datasets, self.hidden_datasets) if d) minvalue = min(min(d) for d in chain(self.datasets, self.hidden_datasets) if d) self.datarange = (minvalue, maxvalue) # Encode data if "chds" in self.options or self.options.get('cht', None) == 'gom': # text encoding if scaling provided, or for google-o-meter type data = "|".join(encode_text(d) for d in chain(self.datasets, self.hidden_datasets)) encoded_data = "t%d:%s" % (len(self.datasets), data) else: # extended encoding otherwise data = extended_separator.join(encode_extended(d, self.datarange) for d in chain(self.datasets, self.hidden_datasets)) encoded_data = "e%d:%s" % (len(self.datasets), data) # Update defaults for k in self.defaults: if k not in self.options: self.options[k] = self.defaults[k] # Start to calculate the URL url = "%s?%s&chd=%s" % (self.BASE, urlencode(self.options), encoded_data) # Calculate axis options if self.axes: axis_options = SortedDict() axis_sides = [] for i, axis in enumerate(self.axes): axis_sides.append(axis.side) for opt in axis.options: try: axis_options.setdefault(opt, []).append(axis.options[opt] % i) except TypeError: pass # Turn the option lists into strings axis_sides = smart_join(",", *axis_sides) for opt in axis_options: axis_options[opt] = smart_join("|", *axis_options[opt]) url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options)) return url def charts(self): res = [] count = 1 for o in self.options['_final_color_map'].items(): res.append({ 'id': count, 'color': o[0], 'label': o[1], 'img': self.img(color_override=o[0]) }) count += 1 return res
# Contains historic platforms that are no longer supported. # These exist so that legacy files can still be edited. PLATFORMS = {PLATFORM_ANY.id: PLATFORM_ANY, PLATFORM_ALL.id: PLATFORM_ALL, PLATFORM_LINUX.id: PLATFORM_LINUX, PLATFORM_MAC.id: PLATFORM_MAC, PLATFORM_BSD.id: PLATFORM_BSD, PLATFORM_WIN.id: PLATFORM_WIN, PLATFORM_SUN.id: PLATFORM_SUN, PLATFORM_ANDROID.id: PLATFORM_ANDROID} MOBILE_PLATFORMS = SortedDict([(PLATFORM_ANDROID.id, PLATFORM_ANDROID)]) DESKTOP_PLATFORMS = SortedDict([(PLATFORM_ALL.id, PLATFORM_ALL), (PLATFORM_LINUX.id, PLATFORM_LINUX), (PLATFORM_MAC.id, PLATFORM_MAC), (PLATFORM_WIN.id, PLATFORM_WIN)]) SUPPORTED_PLATFORMS = DESKTOP_PLATFORMS.copy() SUPPORTED_PLATFORMS.update(MOBILE_PLATFORMS) DESKTOP_PLATFORMS_CHOICES = tuple( (p.id, p.name) for p in DESKTOP_PLATFORMS.values() ) SUPPORTED_PLATFORMS_CHOICES = tuple( (p.id, p.name) for p in SUPPORTED_PLATFORMS.values() ) PLATFORM_DICT = { 'all': PLATFORM_ALL, 'linux': PLATFORM_LINUX, 'mac': PLATFORM_MAC, 'macosx': PLATFORM_MAC, 'darwin': PLATFORM_MAC, 'bsd': PLATFORM_BSD,
class SortedDictTests(SimpleTestCase): def setUp(self): self.d1 = SortedDict() self.d1[7] = 'seven' self.d1[1] = 'one' self.d1[9] = 'nine' self.d2 = SortedDict() self.d2[1] = 'one' self.d2[9] = 'nine' self.d2[0] = 'nil' self.d2[7] = 'seven' def test_basic_methods(self): self.assertEqual(self.d1.keys(), [7, 1, 9]) self.assertEqual(self.d1.values(), ['seven', 'one', 'nine']) self.assertEqual(self.d1.items(), [(7, 'seven'), (1, 'one'), (9, 'nine')]) def test_overwrite_ordering(self): """ Overwriting an item keeps it's place. """ self.d1[1] = 'ONE' self.assertEqual(self.d1.values(), ['seven', 'ONE', 'nine']) def test_append_items(self): """ New items go to the end. """ self.d1[0] = 'nil' self.assertEqual(self.d1.keys(), [7, 1, 9, 0]) def test_delete_and_insert(self): """ Deleting an item, then inserting the same key again will place it at the end. """ del self.d2[7] self.assertEqual(self.d2.keys(), [1, 9, 0]) self.d2[7] = 'lucky number 7' self.assertEqual(self.d2.keys(), [1, 9, 0, 7]) def test_change_keys(self): """ Changing the keys won't do anything, it's only a copy of the keys dict. """ k = self.d2.keys() k.remove(9) self.assertEqual(self.d2.keys(), [1, 9, 0, 7]) def test_init_keys(self): """ Initialising a SortedDict with two keys will just take the first one. A real dict will actually take the second value so we will too, but we'll keep the ordering from the first key found. """ tuples = ((2, 'two'), (1, 'one'), (2, 'second-two')) d = SortedDict(tuples) self.assertEqual(d.keys(), [2, 1]) real_dict = dict(tuples) self.assertEqual(sorted(real_dict.values()), ['one', 'second-two']) # Here the order of SortedDict values *is* what we are testing self.assertEqual(d.values(), ['second-two', 'one']) def test_overwrite(self): self.d1[1] = 'not one' self.assertEqual(self.d1[1], 'not one') self.assertEqual(self.d1.keys(), self.d1.copy().keys()) def test_append(self): self.d1[13] = 'thirteen' self.assertEqual( repr(self.d1), "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}" ) def test_pop(self): self.assertEqual(self.d1.pop(1, 'missing'), 'one') self.assertEqual(self.d1.pop(1, 'missing'), 'missing') # We don't know which item will be popped in popitem(), so we'll # just check that the number of keys has decreased. l = len(self.d1) self.d1.popitem() self.assertEqual(l - len(self.d1), 1) def test_dict_equality(self): d = SortedDict((i, i) for i in xrange(3)) self.assertEqual(d, {0: 0, 1: 1, 2: 2}) def test_tuple_init(self): d = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}") def test_pickle(self): self.assertEqual( pickle.loads(pickle.dumps(self.d1, 2)), {7: 'seven', 1: 'one', 9: 'nine'} ) def test_clear(self): self.d1.clear() self.assertEqual(self.d1, {}) self.assertEqual(self.d1.keyOrder, [])
def map_sum(request): # Filtrar Casas de acordo com os parâmetros param = get_params(request) casas = filtrar_casas(**param) # Montar registros de totalização tot_servicos = SortedDict() tot_projetos = SortedDict() tot_diagnosticos = SortedDict() for ts in TipoServico.objects.all(): tot_servicos[ts.sigla] = 0 for pr in Projeto.objects.all(): tot_projetos[pr.sigla] = 0 tot_convenios = tot_projetos.copy() tot_equipadas = tot_projetos.copy() tot_diagnosticos['A'] = 0 tot_diagnosticos['P'] = 0 # Montar as linhas do array de resultados com as regiões e os estados result = {} for uf in UnidadeFederativa.objects.filter( Q(regiao__in=param['regioes']) | Q(sigla__in=param['estados'])).order_by('regiao', 'nome'): if uf.regiao not in result: result[uf.regiao] = { 'nome': uf.get_regiao_display(), 'ufs': {}, 'servicos': tot_servicos.copy(), 'convenios': tot_projetos.copy(), 'equipadas': tot_projetos.copy(), 'diagnosticos': tot_diagnosticos.copy() } result[uf.regiao]['ufs'][uf.codigo_ibge] = { 'nome': uf.nome, 'servicos': tot_servicos.copy(), 'convenios': tot_projetos.copy(), 'equipadas': tot_projetos.copy(), 'diagnosticos': tot_diagnosticos.copy() } # Processar as casas filtradas for casa in casas.distinct(): uf = casa.municipio.uf for s in casa.servico_set.all(): tot_servicos[s.tipo_servico.sigla] += 1 result[uf.regiao]['servicos'][s.tipo_servico.sigla] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['servicos'][ s.tipo_servico.sigla] += 1 for c in casa.convenio_set.all(): tot_convenios[c.projeto.sigla] += 1 result[uf.regiao]['convenios'][c.projeto.sigla] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['convenios'][ c.projeto.sigla] += 1 if (c.equipada and c.data_termo_aceite is not None): tot_equipadas[c.projeto.sigla] += 1 result[uf.regiao]['equipadas'][c.projeto.sigla] += 1 result[uf.regiao]['ufs'][uf.codigo_ibge]['equipadas'][ c.projeto.sigla] += 1 for d in casa.diagnostico_set.all(): if d.publicado: tot_diagnosticos['P'] += 1 result[uf.regiao]['diagnosticos']['P'] += 1 result[uf.regiao]['ufs'][ uf.codigo_ibge]['diagnosticos']['P'] += 1 else: tot_diagnosticos['A'] += 1 result[uf.regiao]['diagnosticos']['A'] += 1 result[uf.regiao]['ufs'][ uf.codigo_ibge]['diagnosticos']['A'] += 1 extra_context = { 'pagesize': 'a4 landscape', 'servicos': TipoServico.objects.all(), 'projetos': Projeto.objects.all(), 'result': result, 'tot_servicos': tot_servicos, 'tot_convenios': tot_convenios, 'tot_equipadas': tot_equipadas, 'tot_diagnosticos': tot_diagnosticos, } return render_to_pdf('metas/map_sum.html', extra_context)
' AND NOT engdoc.is_archived ' 'ORDER BY engvisits.visits DESC ' + self._limit_clause(max)) return query, params def _format_row(self, row): return _format_row_with_out_of_dateness(self.locale, *row) # L10n Dashboard tables that have their own whole-page views: L10N_READOUTS = SortedDict((t.slug, t) for t in [ MostVisitedTranslationsReadout, TemplateTranslationsReadout, UnreviewedReadout ]) # Contributors ones: CONTRIBUTOR_READOUTS = SortedDict((t.slug, t) for t in [ MostVisitedDefaultLanguageReadout, TemplateReadout, HowToContributeReadout, AdministrationReadout, UnreviewedReadout, NeedsChangesReadout, UnreadyForLocalizationReadout, UnhelpfulReadout ]) # All: READOUTS = L10N_READOUTS.copy() READOUTS.update(CONTRIBUTOR_READOUTS) GROUP_L10N_READOUTS = SortedDict( (t.slug, t) for t in [MostVisitedTranslationsReadout, UnreviewedReadout]) # English group locale is the same as l10n dashboard. GROUP_CONTRIBUTOR_READOUTS = CONTRIBUTOR_READOUTS
class SortedDictTests(SimpleTestCase): def setUp(self): self.d1 = SortedDict() self.d1[7] = 'seven' self.d1[1] = 'one' self.d1[9] = 'nine' self.d2 = SortedDict() self.d2[1] = 'one' self.d2[9] = 'nine' self.d2[0] = 'nil' self.d2[7] = 'seven' def test_basic_methods(self): self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9]) self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'one', 'nine']) self.assertEqual(list(six.iteritems(self.d1)), [(7, 'seven'), (1, 'one'), (9, 'nine')]) def test_overwrite_ordering(self): """ Overwriting an item keeps its place. """ self.d1[1] = 'ONE' self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'ONE', 'nine']) def test_append_items(self): """ New items go to the end. """ self.d1[0] = 'nil' self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0]) def test_delete_and_insert(self): """ Deleting an item, then inserting the same key again will place it at the end. """ del self.d2[7] self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0]) self.d2[7] = 'lucky number 7' self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7]) if six.PY2: def test_change_keys(self): """ Changing the keys won't do anything, it's only a copy of the keys dict. This test doesn't make sense under Python 3 because keys is an iterator. """ k = self.d2.keys() k.remove(9) self.assertEqual(self.d2.keys(), [1, 9, 0, 7]) def test_init_keys(self): """ Initialising a SortedDict with two keys will just take the first one. A real dict will actually take the second value so we will too, but we'll keep the ordering from the first key found. """ tuples = ((2, 'two'), (1, 'one'), (2, 'second-two')) d = SortedDict(tuples) self.assertEqual(list(six.iterkeys(d)), [2, 1]) real_dict = dict(tuples) self.assertEqual(sorted(six.itervalues(real_dict)), ['one', 'second-two']) # Here the order of SortedDict values *is* what we are testing self.assertEqual(list(six.itervalues(d)), ['second-two', 'one']) def test_overwrite(self): self.d1[1] = 'not one' self.assertEqual(self.d1[1], 'not one') self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy()))) def test_append(self): self.d1[13] = 'thirteen' self.assertEqual( repr(self.d1), "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}" ) def test_pop(self): self.assertEqual(self.d1.pop(1, 'missing'), 'one') self.assertEqual(self.d1.pop(1, 'missing'), 'missing') # We don't know which item will be popped in popitem(), so we'll # just check that the number of keys has decreased. l = len(self.d1) self.d1.popitem() self.assertEqual(l - len(self.d1), 1) def test_dict_equality(self): d = SortedDict((i, i) for i in range(3)) self.assertEqual(d, {0: 0, 1: 1, 2: 2}) def test_tuple_init(self): d = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}") def test_pickle(self): self.assertEqual( pickle.loads(pickle.dumps(self.d1, 2)), {7: 'seven', 1: 'one', 9: 'nine'} ) def test_copy(self): orig = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) copied = copy.copy(orig) self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2]) self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2]) def test_clear(self): self.d1.clear() self.assertEqual(self.d1, {}) self.assertEqual(self.d1.keyOrder, []) def test_reversed(self): self.assertEqual(list(self.d1), [7, 1, 9]) self.assertEqual(list(self.d2), [1, 9, 0, 7]) self.assertEqual(list(reversed(self.d1)), [9, 1, 7]) self.assertEqual(list(reversed(self.d2)), [7, 0, 9, 1]) def test_insert(self): d = SortedDict() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") d.insert(0, "hello", "world") assert w[0].category is DeprecationWarning def test_value_for_index(self): d = SortedDict({"a": 3}) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") self.assertEqual(d.value_for_index(0), 3) assert w[0].category is DeprecationWarning
for more information. """ MEDIA_TREE_ICON_FINDERS = getattr(settings, 'MEDIA_TREE_ICON_FINDERS', ( 'media_tree.utils.staticfiles.MimetypeStaticIconFileFinder', )) MEDIA_TREE_ADMIN_THUMBNAIL_SIZES = SortedDict(( ('tiny', (20, 20)), ('small', (70, 70)), ('default', (100, 100)), ('medium', (250, 250)), ('large', (400, 400)), )) MEDIA_TREE_THUMBNAIL_SIZES = MEDIA_TREE_ADMIN_THUMBNAIL_SIZES.copy() MEDIA_TREE_THUMBNAIL_SIZES.update({ 'full': None, # None means: use original size }) MEDIA_TREE_THUMBNAIL_SIZES.update(getattr(settings, 'MEDIA_TREE_THUMBNAIL_SIZES', {})) """ A dictionary of default thumbnail sizes. You can pass the dictionary key to the ``thumbnail`` templatetag instead of a numeric size. Default:: { 'small': (80, 80), 'default': (100, 100), 'medium': (250, 250),
class SortedDictTests(IgnoreDeprecationWarningsMixin, SimpleTestCase): def setUp(self): super(SortedDictTests, self).setUp() self.d1 = SortedDict() self.d1[7] = 'seven' self.d1[1] = 'one' self.d1[9] = 'nine' self.d2 = SortedDict() self.d2[1] = 'one' self.d2[9] = 'nine' self.d2[0] = 'nil' self.d2[7] = 'seven' def test_basic_methods(self): self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9]) self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'one', 'nine']) self.assertEqual(list(six.iteritems(self.d1)), [(7, 'seven'), (1, 'one'), (9, 'nine')]) def test_overwrite_ordering(self): """ Overwriting an item keeps its place. """ self.d1[1] = 'ONE' self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'ONE', 'nine']) def test_append_items(self): """ New items go to the end. """ self.d1[0] = 'nil' self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0]) def test_delete_and_insert(self): """ Deleting an item, then inserting the same key again will place it at the end. """ del self.d2[7] self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0]) self.d2[7] = 'lucky number 7' self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7]) if six.PY2: def test_change_keys(self): """ Changing the keys won't do anything, it's only a copy of the keys dict. This test doesn't make sense under Python 3 because keys is an iterator. """ k = self.d2.keys() k.remove(9) self.assertEqual(self.d2.keys(), [1, 9, 0, 7]) def test_init_keys(self): """ Initialising a SortedDict with two keys will just take the first one. A real dict will actually take the second value so we will too, but we'll keep the ordering from the first key found. """ tuples = ((2, 'two'), (1, 'one'), (2, 'second-two')) d = SortedDict(tuples) self.assertEqual(list(six.iterkeys(d)), [2, 1]) real_dict = dict(tuples) self.assertEqual(sorted(six.itervalues(real_dict)), ['one', 'second-two']) # Here the order of SortedDict values *is* what we are testing self.assertEqual(list(six.itervalues(d)), ['second-two', 'one']) def test_overwrite(self): self.d1[1] = 'not one' self.assertEqual(self.d1[1], 'not one') self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy()))) def test_append(self): self.d1[13] = 'thirteen' self.assertEqual(repr(self.d1), "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}") def test_pop(self): self.assertEqual(self.d1.pop(1, 'missing'), 'one') self.assertEqual(self.d1.pop(1, 'missing'), 'missing') # We don't know which item will be popped in popitem(), so we'll # just check that the number of keys has decreased. l = len(self.d1) self.d1.popitem() self.assertEqual(l - len(self.d1), 1) def test_dict_equality(self): d = SortedDict((i, i) for i in range(3)) self.assertEqual(d, {0: 0, 1: 1, 2: 2}) def test_tuple_init(self): d = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}") def test_pickle(self): self.assertEqual(pickle.loads(pickle.dumps(self.d1, 2)), { 7: 'seven', 1: 'one', 9: 'nine' }) def test_copy(self): orig = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) copied = copy.copy(orig) self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2]) self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2]) def test_clear(self): self.d1.clear() self.assertEqual(self.d1, {}) self.assertEqual(self.d1.keyOrder, []) def test_reversed(self): self.assertEqual(list(self.d1), [7, 1, 9]) self.assertEqual(list(self.d2), [1, 9, 0, 7]) self.assertEqual(list(reversed(self.d1)), [9, 1, 7]) self.assertEqual(list(reversed(self.d2)), [7, 0, 9, 1])
class Chart(object): BASE = "http://chart.apis.google.com/chart" defaults = { "chs": "200x200", "cht": "lc" } def __init__(self): # Use a SortedDict for the opeions so they are added in a # deterministic manner; this eases things like dealing with cache keys # or writing unit tests. self.options = SortedDict() self.datasets = [] self.axes = [] self.datarange = None self.alt = None self.grid_lines = False def clone(self): clone = self.__class__() clone.options = self.options.copy() clone.datasets = self.datasets[:] clone.axes = self.axes[:] return clone def img(self): url = self.url() width, height = self.options["chs"].split("x") if self.alt: alt = 'alt="%s" ' % escape(self.alt) else: alt = '' s = mark_safe('<img src="%s" width="%s" height="%s" %s/>' % (escape(url), width, height, alt)) return s def url(self): if self.options.get('cht', None) == 't': self.datasets.append(self.options.pop("_mapdata")) # Figure out the chart's data range if not self.datarange: maxvalue = max(max(d) for d in self.datasets if d) minvalue = min(min(d) for d in self.datasets if d) self.datarange = (minvalue, maxvalue) # Encode data if "chds" in self.options or self.options.get('cht', None) == 'gom': # text encoding if scaling provided, or for google-o-meter type data = "|".join(encode_text(d) for d in self.datasets) if self.grid_lines: encoded_data = "t1:%s" % data else: encoded_data = "t:%s" % data else: # extended encoding otherwise data = extended_separator.join(encode_extended(d, self.datarange) for d in self.datasets) encoded_data = "e:%s" % data # Update defaults for k in self.defaults: if k not in self.options: self.options[k] = self.defaults[k] # Start to calcuate the URL url = "%s?%s&chd=%s" % (self.BASE, urlencode(self.options), encoded_data) # Calculate axis options if self.axes: axis_options = SortedDict() axis_sides = [] for i, axis in enumerate(self.axes): axis_sides.append(axis.side) for opt in axis.options: axis_options.setdefault(opt, []).append(axis.options[opt] % i) # Turn the option lists into strings axis_sides = smart_join(",", *axis_sides) for opt in axis_options: axis_options[opt] = smart_join("|", *axis_options[opt]) url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options)) return url
class Chart(object): BASE = "http://chart.apis.google.com/chart" defaults = {"chs": "200x200", "cht": "lc"} def __init__(self): # Use a SortedDict for the options so they are added in a # deterministic manner; this eases things like dealing with cache keys # or writing unit tests. self.options = SortedDict() self.datasets = [] self.hidden_datasets = [] self.axes = [] self.datarange = None self.alt = None def clone(self): clone = self.__class__() clone.options = self.options.copy() clone.datasets = self.datasets[:] clone.hidden_datasets = self.hidden_datasets[:] clone.axes = self.axes[:] return clone def img(self, color_override=None): orig_colors = self.options.get('chco') # If color_override is set, replace the chco option with this color if color_override is not None: final_color = [] for c in self.options['chco'].split(','): if c == color_override: c = orig_colors.split(',')[0] else: c = _chart_inactive_color final_color.append(c) self.options['chco'] = ','.join(final_color) url = self.url() if orig_colors: self.options['chco'] = orig_colors width, height = self.options["chs"].split("x") if self.alt: alt = '%s' % escape(self.alt) else: alt = '' s = mark_safe('<img src="%s" width="%s" height="%s" alt="%s" />' % (escape(url), width, height, alt)) return s def url(self): if self.options.get('cht', None) == 't': self.datasets.append(self.options.pop("_mapdata")) # Figure out the chart's data range if not self.datarange: maxvalue = max( max(d) for d in chain(self.datasets, self.hidden_datasets) if d) minvalue = min( min(d) for d in chain(self.datasets, self.hidden_datasets) if d) self.datarange = (minvalue, maxvalue) # Encode data if "chds" in self.options or self.options.get('cht', None) == 'gom': # text encoding if scaling provided, or for google-o-meter type data = "|".join( encode_text(d) for d in chain(self.datasets, self.hidden_datasets)) encoded_data = "t%d:%s" % (len(self.datasets), data) else: # extended encoding otherwise data = extended_separator.join( encode_extended(d, self.datarange) for d in chain(self.datasets, self.hidden_datasets)) encoded_data = "e%d:%s" % (len(self.datasets), data) # Update defaults for k in self.defaults: if k not in self.options: self.options[k] = self.defaults[k] # Start to calculate the URL url = "%s?%s&chd=%s" % (self.BASE, urlencode( self.options), encoded_data) # Calculate axis options if self.axes: axis_options = SortedDict() axis_sides = [] for i, axis in enumerate(self.axes): axis_sides.append(axis.side) for opt in axis.options: try: axis_options.setdefault(opt, []).append( axis.options[opt] % i) except TypeError: pass # Turn the option lists into strings axis_sides = smart_join(",", *axis_sides) for opt in axis_options: axis_options[opt] = smart_join("|", *axis_options[opt]) url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options)) return url def charts(self): res = [] count = 1 for o in self.options['_final_color_map'].items(): res.append({ 'id': count, 'color': o[0], 'label': o[1], 'img': self.img(color_override=o[0]) }) count += 1 return res
for more information. """ MEDIA_TREE_ICON_FINDERS = getattr(settings, 'MEDIA_TREE_ICON_FINDERS', ( 'media_tree.utils.staticfiles.MimetypeStaticIconFileFinder', )) MEDIA_TREE_ADMIN_THUMBNAIL_SIZES = SortedDict(( ('tiny', (20, 20)), ('small', (70, 70)), ('default', (100, 100)), ('medium', (250, 250)), ('large', (400, 400)), )) MEDIA_TREE_THUMBNAIL_SIZES = MEDIA_TREE_ADMIN_THUMBNAIL_SIZES.copy() MEDIA_TREE_THUMBNAIL_SIZES.update({ 'full': None, # None means: use original size }) MEDIA_TREE_THUMBNAIL_SIZES.update(getattr(settings, 'MEDIA_TREE_THUMBNAIL_SIZES', {})) """ A dictionary of default thumbnail sizes. You can pass the dictionary key to the ``thumbnail`` templatetag instead of a numeric size. Default:: { 'small': (80, 80), 'default': (100, 100), 'medium': (250, 250),