def get_value(self, context, image_list): result = SortedDict() for image in image_list: if image.group: group_name = image.group result.setdefault(group_name, []).append(image) return result
def get_fields(self): """ Returns the complete set of fields for the object as a dict. This will be the set of any explicitly declared fields, plus the set of fields returned by get_default_fields(). """ ret = SortedDict() # Get the explicitly declared fields base_fields = copy.deepcopy(self.base_fields) for key, field in base_fields.items(): ret[key] = field # Set up the field field.initialize(parent=self, field_name=key) # Add in the default fields default_fields = self.get_default_fields() for key, val in default_fields.items(): if key not in ret: ret[key] = val # If 'fields' is specified, use those fields, in that order. if self.opts.fields: new = SortedDict() for key in self.opts.fields: new[key] = ret[key] ret = new # Remove anything in 'exclude' if self.opts.exclude: for key in self.opts.exclude: ret.pop(key, None) return ret
def get_item(self,row): """ Given a header and a row return a sorted dict """ def normalize(s): if isinstance(s,basestring): try: return to_unicode(s.strip()) except (UnicodeDecodeError,UnicodeEncodeError): return s.strip() else: return s # if we have headers = ['a','b'] and values [1,2,3,4], dict will be # {'a':1,'b':2} # if we have headers = ['a','b','c','d'] and values [1,2], dict will be # {'a':1,'b':2} d = SortedDict([i for i in zip(self.headers,map(normalize,row)) if i[0]]) # since zip can cut tuple to smaller sequence, if we get incomplete # lines in file this for over headers put it on row dict for k in self.headers: if k not in d: d[k]=u'' d.keyOrder = self.headers return d
def get_ms_id_from_image_names(manuscripts, folios): ''' Returns (ret, suggested_shelfmark) ret = the id of the itempart which matches the most the given images suggested_shelfmark = a suggested shelfmark The matching is based on similarity of the name (shelfmark) and locus ''' ret = None # a term is part of the search pattern if its frequency is above the threshold threshold = 0.75 # find a pattern among the image paths pattern = SortedDict() folio_count = len(folios) for folio in folios: im_path = folio.iipimage.name # remove the extension im_path = re.sub(ur'\.[^.]{2,4}$', '', im_path) # only keep the image file name and the parent folder parts = im_path.split('/') for i in range(max(0, len(parts) - 2), len(parts)): part = parts[i] for term in re.findall(ur'[^\W_]+', part): pattern[term] = pattern.get(term, 0) + 1
def get_language_config(content_language=None): language = get_language()[:2] if content_language: content_language = content_language[:2] else: content_language = language config = {} config['language'] = language lang_names = SortedDict() for lang, name in settings.LANGUAGES: if lang[:2] not in lang_names: lang_names[lang[:2]] = [] lang_names[lang[:2]].append(_(name)) sp_langs = [] for lang, names in lang_names.items(): if lang == content_language: default = '+' else: default = '' sp_langs.append(u'%s%s=%s' % (default, ' / '.join(names), lang)) config['spellchecker_languages'] = ','.join(sp_langs) if content_language in settings.LANGUAGES_BIDI: config['directionality'] = 'rtl' else: config['directionality'] = 'ltr' if tinymce_settings.USE_SPELLCHECKER: config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check') return config
def fields_for_model(instance, fields=None, exclude=None, formfield_callback=None): """ Returns a "SortedDict" containing form fields for the given fom_object. ``fields`` is an optional list of field names. If provided, only the named fields will be included in the returned fields. ``exclude`` is an optional list of field names. If provided, the named fields will be excluded from the returned fields, even if they are listed in the ``fields`` argument. """ field_list = [] ignored = [] for f in instance.ordered_fields: if fields and not f in fields: continue if exclude and not f in exclude: continue formfield = formfield_for_model_field(instance, f) if formfield: field_list.append((f, formfield)) field_dict = SortedDict(field_list) if fields: field_dict = SortedDict( [(f, field_dict.get(f)) for f in fields if ((not exclude) or (exclude and f not in exclude))] ) return field_dict
def list_targets(self): tenant_id = self.request.user.tenant_id ports = port_list(self.request, tenant_id=tenant_id) servers, has_more = nova.server_list(self.request) server_dict = SortedDict([(s.id, s.name) for s in servers]) reachable_subnets = self._get_reachable_subnets(ports) if is_service_enabled(self.request, config_name='enable_lb', ext_name='lbaas'): # Also get the loadbalancer VIPs vip_dict = {v['port_id']: v['name'] for v in self.client.list_vips().get('vips', [])} else: vip_dict = {} targets = [] for p in ports: # Remove network ports from Floating IP targets if p.device_owner.startswith('network:'): continue port_id = p.id server_name = server_dict.get(p.device_id) or vip_dict.get(port_id) for ip in p.fixed_ips: if ip['subnet_id'] not in reachable_subnets: continue target = {'name': '%s: %s' % (server_name, ip['ip_address']), 'id': '%s_%s' % (port_id, ip['ip_address']), 'instance_id': p.device_id} targets.append(FloatingIpTarget(target)) return targets
def get_data(self): # Gather our volumes try: volumes = api.volume_list(self.request) except: volumes = [] exceptions.handle(self.request, _('Unable to retrieve volume list.')) try: instance_list = api.server_list(self.request) except: instance_list = [] exceptions.handle(self.request, _("Unable to retrieve volume/instance " "attachment information")) instances = SortedDict([(inst.id, inst) for inst in instance_list]) for volume in volumes: # It is possible to create a volume with no name through the # EC2 API, use the ID in those cases. if not volume.display_name: volume.display_name = volume.id for att in volume.attachments: server_id = att.get('server_id', None) att['instance'] = instances.get(server_id, None) return volumes
def log_unidadesaude_by_form(request, healthUnit,stDate=365, endDate=0): if not request.user.is_authenticated(): return HttpResponseRedirect(settings.SITE_ROOT + 'admin/') now= datetime.now() try: us = UnidadeSaude.objects.get(pk=int(healthUnit)) except: return HttpResponseNotFound('Id de unidade errado') truncate_date = connection.ops.date_trunc_sql('month', 'forms_ficha.data_insercao') fichas_report = Ficha.objects.\ filter( unidadesaude=us, data_insercao__gte=now -timedelta(days=stDate), data_insercao__lte=now -timedelta(days=endDate) ).\ extra(select={'month': truncate_date}).\ values('formulario__nome', 'month').\ annotate(numero_fichas=Count('pk')).\ order_by('-month') fichas_report = [ dict([ ('formulario__nome', l['formulario__nome']), ('month', datetime.strptime(l['month'].split(' ')[0], '%Y-%m-%d').strftime('%Y%m')), ('numero_fichas', l['numero_fichas']) ]) for l in fichas_report] columns = [dt.strftime('%Y%m') for dt in getMonthList(now, stDate, endDate)] forms = Formulario.objects.all() rows = [r.nome for r in forms] table_data = SortedDict().fromkeys(rows) for k in table_data.keys(): table_data[k] = SortedDict().fromkeys(columns, 0) for f in fichas_report: table_data[f['formulario__nome']][f['month']] = f['numero_fichas'] return json_response(table_data)
def get(self, request, namespace=None, report_slug=None, widget_id=None): try: all_fields = SortedDict() if widget_id: w = Widget.objects.get(pk=widget_id) all_fields = w.collect_fields() else: report = Report.objects.get(namespace=namespace, slug=report_slug) fields_by_section = report.collect_fields_by_section() for c in fields_by_section.values(): all_fields.update(c) except: raise Http404 form = TableFieldForm(all_fields, use_widgets=False) # create object from the tablefield keywords # then populate it with the initial data that got generated by default keys = form._tablefields.keys() criteria = dict(zip(keys, [None]*len(keys))) criteria.update(form.data) return HttpResponse(json.dumps(criteria))
def post(self, request, namespace=None, report_slug=None): # handle REST calls if report_slug is None: return self.http_method_not_allowed(request) logger.debug("Received POST for report %s, with params: %s" % (report_slug, request.POST)) try: report = Report.objects.get(slug=report_slug) except: raise Http404 fields_by_section = report.collect_fields_by_section() all_fields = SortedDict() [all_fields.update(c) for c in fields_by_section.values()] form = TableFieldForm(all_fields, hidden_fields=report.hidden_fields, data=request.POST, files=request.FILES) response = [] for field in form.dynamic_fields(): response.append({'id': field.auto_id, 'html': str(field)}) return HttpResponse(json.dumps(response))
def get_actions(self): labels, details = self._review_actions() actions = SortedDict() if self.review_type != 'preliminary': actions['public'] = {'method': self.handler.process_public, 'minimal': False, 'label': _lazy('Push to public')} actions['prelim'] = {'method': self.handler.process_preliminary, 'label': labels['prelim'], 'minimal': False} actions['reject'] = {'method': self.handler.process_sandbox, 'label': _lazy('Reject'), 'minimal': False} actions['info'] = {'method': self.handler.request_information, 'label': _lazy('Request more information'), 'minimal': True} actions['super'] = {'method': self.handler.process_super_review, 'label': _lazy('Request super-review'), 'minimal': True} actions['comment'] = {'method': self.handler.process_comment, 'label': _lazy('Comment'), 'minimal': True} for k, v in actions.items(): v['details'] = details.get(k) return actions
def create_document(name, fields, attrs={}, module="dockit.models", base=DocumentBase, parents=(Document,), **kwargs): all_attrs = SortedDict(fields) all_attrs.update(attrs) all_attrs["__module__"] = module if kwargs: all_attrs["Meta"] = UserMeta(**kwargs) return base.__new__(base, name, parents, all_attrs)
def _site_query(period, start, end, field=None, request=None): old_version = request and request.GET.get("old_version", "0") or "0" cursor = connection.cursor() # Let MySQL make this fast. Make sure we prevent SQL injection with the # assert. if period not in SERIES_GROUPS_DATE: raise AssertionError("%s period is not valid." % period) sql = ( "SELECT name, MIN(date), SUM(count) " "FROM global_stats " "WHERE date > %%s AND date <= %%s " "AND name IN (%s) " "GROUP BY %s(date), name " "ORDER BY %s(date) DESC;" % (", ".join(["%s" for key in _KEYS.keys()]), period, period) ) cursor.execute(sql, [start, end] + _KEYS.keys()) # Process the results into a format that is friendly for render_*. default = dict([(k, 0) for k in _CACHED_KEYS]) result = SortedDict() for name, date, count in cursor.fetchall(): date = date.strftime("%Y-%m-%d") if date not in result: result[date] = default.copy() result[date]["date"] = date result[date]["data"] = {} result[date]["data"][_KEYS[name]] = count return result.values(), _CACHED_KEYS
def fields_for_document(document, fields=None, exclude=None, formfield_callback=None): """ Returns a ``SortedDict`` containing form fields for the given model. ``fields`` is an optional list of field names. If provided, only the named fields will be included in the returned fields. ``exclude`` is an optional list of field names. If provided, the named fields will be excluded from the returned fields, even if they are listed in the ``fields`` argument. """ field_list = [] structure = document.structure for field_name, field_type in structure.items(): if fields and not field_name in fields: continue if exclude and field_name in exclude: continue form_field = None if formfield_callback: form_field = formfield_callback(document, field_name) if not form_field: form_field = formfield_for_document_field(document, field_name) if form_field: field_list.append((field_name, form_field)) field_dict = SortedDict(field_list) if fields: field_dict = SortedDict([(f, field_dict.get(f)) for f in fields if (not exclude) or (exclude and f not in exclude)]) return field_dict
def cleanup_email_addresses(request, addresses): """ Takes a list of EmailAddress instances and cleans it up, making sure only valid ones remain, without multiple primaries etc. Order is important: e.g. if multiple primary e-mail addresses exist, the first one encountered will be kept as primary. """ from .models import EmailAddress adapter = get_adapter() # Let's group by `email` e2a = SortedDict() # maps email to EmailAddress primary_addresses = [] verified_addresses = [] primary_verified_addresses = [] for address in addresses: # Pick up only valid ones... email = valid_email_or_none(address.email) if not email: continue # ... and non-conflicting ones... if (app_settings.UNIQUE_EMAIL and EmailAddress.objects .filter(email__iexact=email) .exists()): continue a = e2a.get(email.lower()) if a: a.primary = a.primary or address.primary a.verified = a.verified or address.verified else: a = address a.verified = a.verified or adapter.is_email_verified(request, a.email) e2a[email.lower()] = a if a.primary: primary_addresses.append(a) if a.verified: primary_verified_addresses.append(a) if a.verified: verified_addresses.append(a) # Now that we got things sorted out, let's assign a primary if primary_verified_addresses: primary_address = primary_verified_addresses[0] elif verified_addresses: # Pick any verified as primary primary_address = verified_addresses[0] elif primary_addresses: # Okay, let's pick primary then, even if unverified primary_address = primary_addresses[0] elif e2a: # Pick the first primary_address = e2a.keys()[0] else: # Empty primary_address = None # There can only be one primary for a in e2a.values(): a.primary = primary_address.email.lower() == a.email.lower() return list(e2a.values()), primary_address
class AppDirectoriesFinder(BaseFinder): """ A static files finder that looks in the directory of each app as specified in the source_dir attribute of the given storage class. """ storage_class = AppStaticStorage def __init__(self, apps=None, *args, **kwargs): # The list of apps that are handled self.apps = [] # Mapping of app module paths to storage instances self.storages = SortedDict() if apps is None: apps = settings.INSTALLED_APPS for app in apps: app_storage = self.storage_class(app) if os.path.isdir(app_storage.location): self.storages[app] = app_storage if app not in self.apps: self.apps.append(app) super(AppDirectoriesFinder, self).__init__(*args, **kwargs) def list(self, ignore_patterns): """ List all files in all app storages. """ for storage in self.storages.itervalues(): if storage.exists(''): # check if storage location exists for path in utils.get_files(storage, ignore_patterns): yield path, storage def find(self, path, all=False): """ Looks for files in the app directories. """ matches = [] for app in self.apps: match = self.find_in_app(app, path) if match: if not all: return match matches.append(match) return matches def find_in_app(self, app, path): """ Find a requested static file in an app's static locations. """ storage = self.storages.get(app, None) if storage: if storage.prefix: prefix = '%s%s' % (storage.prefix, os.sep) if not path.startswith(prefix): return None path = path[len(prefix):] # only try to find a file if the source dir actually exists if storage.exists(path): matched_path = storage.path(path) if matched_path: return matched_path
def get_containers(template): # Build a tree of the templates we're using, placing the root template first. levels = build_extension_tree(template.nodelist) contentlet_specs = [] contentreference_specs = SortedDict() blocks = {} for level in reversed(levels): level.initialize() contentlet_specs.extend(itertools.ifilter(lambda x: x not in contentlet_specs, level.contentlet_specs)) contentreference_specs.update(level.contentreference_specs) for name, block in level.blocks.items(): if block.block_super: blocks.setdefault(name, []).append(block) else: blocks[name] = [block] for block_list in blocks.values(): for block in block_list: block.initialize() contentlet_specs.extend(itertools.ifilter(lambda x: x not in contentlet_specs, block.contentlet_specs)) contentreference_specs.update(block.contentreference_specs) return contentlet_specs, contentreference_specs
def get_favored_results(self, error=5, threshhold=None): """ Calculates the set of most-favored results based on their weight. Evenly-weighted results will be grouped together and either added or excluded as a group. :param error: An arbitrary number; higher values will cause this method to be more reticent about adding new items to the favored results. :param threshhold: Will be passed directly into :meth:`get_weighted_results` """ if not hasattr(self, '_favored_results'): results = self.get_weighted_results(threshhold) grouped_results = SortedDict() for result in results: grouped_results.setdefault(result.weight, []).append(result) self._favored_results = [] for value, subresults in grouped_results.items(): cost = error * sum([(value - result.weight)**2 for result in self._favored_results]) if value > cost: self._favored_results += subresults else: break if len(self._favored_results) == len(results): self._favored_results = [] return self._favored_results
def dump_data(request,appname): app_list = SortedDict() try: if request.POST: for appname in request.POST.getlist('apps'): app = get_app(appname) app_list[app] = None appname = 'choices' else: app = get_app(appname) app_list[app] = None except ImproperlyConfigured: if appname == 'all': for app in get_apps(): app_list[app] = None if(len(app_list) > 0): objects = [] for model in sort_dependencies(app_list.items()): if not model._meta.proxy and router.allow_syncdb(DEFAULT_DB_ALIAS, model): objects.extend(model._default_manager.using(DEFAULT_DB_ALIAS).all()) serializers.get_serializer('json') json = serializers.serialize('json', objects, indent=2,use_natural_keys=True) response = HttpResponse(json, mimetype='application/json'); response['Content-Disposition'] = 'attachment; filename=%s_%s_fixture.json' % (date.today().__str__(),appname) return response return render_to_response('diagnostic/dumpdata.html',context_instance=RequestContext(request))
def get_nav_menu(self): nav_menu = self.get_site_menu() if nav_menu: return nav_menu nav_menu = SortedDict() for model, model_admin in self.admin_site._registry.items(): app_label = model._meta.app_label model_dict = { 'title': unicode(capfirst(model._meta.verbose_name_plural)), 'url': self.get_model_url(model, "changelist"), 'perm': self.get_model_perm(model, 'change') } app_key = "app:%s" % app_label if app_key in nav_menu: nav_menu[app_key]['menus'].append(model_dict) else: nav_menu[app_key] = { 'title': unicode(app_label.title()), 'menus': [model_dict], } for menu in nav_menu.values(): menu['menus'].sort(key=lambda x: x['title']) nav_menu = nav_menu.values() nav_menu.sort(key=lambda x: x['title']) return nav_menu
def history(request): """ Tally total expenses and income for each month """ history = SortedDict() entries = Entry.objects.all().order_by('-date') for e in entries: # Create dict key this_month = datetime.date(e.date.year, e.date.month, 1) if not history.has_key(this_month): history[this_month] = {'income':0, 'expense':0} #sum values for month if e.category.type in ['EXP', 'COGS']: history[this_month]['expense'] += e.amount elif e.category.type == 'INC': history[this_month]['income'] += e.amount for date, value_dict in history.items(): value_dict['net'] = value_dict['income'] - value_dict['expense'] return simple.direct_to_template(request, template='beancounter/history.html', extra_context = { 'history': history })
def dashboard(request): if request.user.groups.filter(name__in=['SPDT-Servidores', 'SSPLF']).count() <= 0: raise PermissionDenied desembolsos_max = 0 matriz = SortedDict() dados = SortedDict() projetos = Projeto.objects.all() meses = Desembolso.objects.dates('data', 'month', 'DESC')[:6] colors = ['ffff00', 'cc7900', 'ff0000', '92d050', '006600', '0097cc', '002776', 'ae78d6', 'ff00ff', '430080', '28d75c', '0000ff', 'fff200'] for date in reversed(meses): mes_ano = '%s/%s' % (date.month, date.year) dados[mes_ano] = 0 for p in projetos: matriz[p.id] = (p.sigla, dados.copy()) for date in meses: mes_ano = '%s/%s' % (date.month, date.year) for d in Desembolso.objects.filter(data__year=date.year, data__month=date.month).values('projeto').annotate(total_dolar=Sum('valor_dolar')): if int(d['total_dolar']) > desembolsos_max: desembolsos_max = int(d['total_dolar']) matriz[d['projeto']][1][mes_ano] += int(d['total_dolar']) meses = ["%s/%s" % (m.month, m.year) for m in reversed(meses)] extra_context = {'desembolsos': matriz, 'desembolsos_max': desembolsos_max, 'meses': meses, 'colors': ','.join(colors[:len(matriz)])} return render_to_response('metas/dashboard.html', extra_context, context_instance=RequestContext(request))
def _build_report_data(self): data = [] tmp_data = SortedDict() totals = defaultdict(int) visits = self.visit_queryset().order_by('visit_date') for visit in visits: # ideally this would be done in the database # but grouping really isn't a thing Django does well date = visit.visit_date if date not in tmp_data: tmp_data[date] = defaultdict(int) tmp_data[date]['visits'] += 1 page_visits = visit.pagevisit_set.count() tmp_data[date]['pageviews'] += page_visits tmp_data[date]['duration'] += visit.duration or 0 if page_visits == 1: tmp_data[date]['bounces'] += 1 if visit.visitor.visit_set.count() > 1: tmp_data[date]['returning_visits'] += 1 else: tmp_data[date]['new_visits'] += 1 for visit_date, visit_data in tmp_data.iteritems(): for key, value in visit_data.items(): totals[key] += value date_label = utils.format_date(visit_date) data.append(self._build_row(visit_data, date_label)) if data: data.append(self._build_row(totals, "Totals")) return data
class RightManager(object): def __init__(self): self.right_cls = Right self._levels = SortedDict() self._categories = SortedDict() self._rights = SortedDict() def register_level(self, key, title, description=""): self._levels[key] = RightLevel(self, key, title, description) def register_category(self, key, title, description=""): self._categories[key] = RightCategory(self, key, title, description) def register(self, key, title, description="", category="", level=""): self._rights[key]=Right(self, key, title, description=description, category=category, level=level) def get_category(self, category_key): return self._categories.get(category_key, None) def get_level(self, level_key): return self._levels.get(level_key, None) def get_right(self, key): return self._rights.get(key) def get_rights(self): return [self.get_right(key) for key in self._rights.keys()] def get_rights_dict(self): return [self.get_right(key).to_dict() for key in self._rights.keys()]
def viewProfile(request): if not request.user.is_authenticated(): return HttpResponseRedirect('/') if not request.get_full_path().__contains__('id') and request.user.is_superuser: return HttpResponseRedirect('/') if request.get_full_path().__contains__('id') and not request.user.is_superuser: inputCusId = int(request.get_full_path().split('id=')[-1]) if inputCusId != dbaccess.getCustIdByUserId(request.user.id): return HttpResponseRedirect('/') else: custRow = dbaccess.getCustInfoById(int(request.get_full_path().split('id=')[-1])) else: if request.get_full_path().__contains__('id'): custRow = dbaccess.getCustInfoById(int(request.get_full_path().split('id=')[-1])) else: cusId = dbaccess.getCustIdByUserId(request.user.id) custRow = dbaccess.getCustInfoById(cusId) custInfo = SortedDict([ ('Email', custRow[3]), ('First Name', custRow[1]), ('Last Name', custRow[2]), ('Street Name', custRow[6]), ('Postal Code', custRow[5]), ('Contact Number', custRow[4]), ]) custCredit = SortedDict([ ('Serial Number', custRow[8]), ('Expiry Date', custRow[7]) ]) return render_to_response('userprofile/viewProfile.html', { 'custInfo': custInfo.iteritems(), 'custCredit': custCredit.iteritems(), }, context_instance=RequestContext(request))
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=lambda f, **kwargs: f.formfield(**kwargs)): """ Returns a ``SortedDict`` containing form fields for the given model. ``fields`` is an optional list of field names. If provided, only the named fields will be included in the returned fields. ``exclude`` is an optional list of field names. If provided, the named fields will be excluded from the returned fields, even if they are listed in the ``fields`` argument. """ field_list = [] opts = model._meta for f in opts.fields + opts.many_to_many: if not f.editable: continue if fields and not f.name in fields: continue if exclude and f.name in exclude: continue if widgets and f.name in widgets: kwargs = {'widget': widgets[f.name]} else: kwargs = {} formfield = formfield_callback(f, **kwargs) if formfield: field_list.append((f.name, formfield)) field_dict = SortedDict(field_list) if fields: field_dict = SortedDict([(f, field_dict.get(f)) for f in fields if (not exclude) or (exclude and f not in exclude)]) return field_dict
def build_pretty_data_view(form_instance, model_object, exclude=(), append=()): ''' Taken from: http://stackoverflow.com/questions/2170228/django-iterate-over-model-instance-field-names-and-values-in-template#comment4280740_3431104 @author: Alan Viars ''' i=0 sd=SortedDict() for j in append: try: sdvalue={'label':j.capitalize(), 'fieldvalue':model_object.__getattribute__(j)} sd.insert(i, j, sdvalue) i+=1 except(AttributeError): pass for k,v in form_instance.fields.items(): sdvalue={'label':"", 'fieldvalue':""} if not exclude.__contains__(k): if v.label is not None: sdvalue = {'label':v.label, 'fieldvalue': model_object.__getattribute__(k)} else: sdvalue = {'label':k, 'fieldvalue': model_object.__getattribute__(k)} sd.insert(i, k, sdvalue) i+=1 return sd
def get_message_list(project_id, lang_id, src_filters={}, target_filters={}): lang_id = int(lang_id) project_language = Project.objects.get(id=project_id).lang.id new_query = SetMessage.objects.all() if not new_query.exists(): return [] new_query = new_query.filter(_normalize_filters(src_filters)) target_predicate = _normalize_filters(target_filters) if target_predicate: new_query = new_query.filter(Q(lang=project_language) | Q(target_predicate)) res = SortedDict() new_query = _update_message_query(new_query, project_id, lang_id) for data in new_query.order_by('msgid'): msg_info = res.setdefault(data.msgid, {'msg_id': data.msgid}) if data.lang_id == lang_id: msg_info.update( { 'msg_target': data.msgstr, 'target_id': data.id, 'is_translated': data.is_translated } ) if data.lang_id == project_language: msg_info.update({'msg_source': data.msgstr, 'id': data.id}) messages = [i for i in res.values() if 'msg_target' in i] return messages
def make_fields(self, **kwargs): if self.localized: langs_dict = SortedDict(django_settings.LANGUAGES) default_code = django_settings.LANGUAGE_CODE default_name = langs_dict[default_code] langs_dict.insert(0, default_code, default_name) langs = langs_dict.keys() else: langs = (django_settings.LANGUAGE_CODE,) fields = list() for lang in langs: kwargs['language_code'] = lang fields.append(self.make_field(**kwargs)) #set initial values for field in fields: lang = field.language_code field.initial = self.get_editor_value(lang) if self.localized and len(django_settings.LANGUAGES) > 1: for field in fields: lang_name = unicode(langs_dict[field.language_code]) field.label += mark_safe(' <span class="lang">(%s)</span>' % lang_name) return fields
def compress(self, log=None, **options): """ Searches templates containing 'compress' nodes and compresses them "offline" -- outside of the request/response cycle. The result is cached with a cache-key derived from the content of the compress nodes (not the content of the possibly linked files!). """ extensions = options.get('extensions') extensions = self.handle_extensions(extensions or ['html']) verbosity = int(options.get("verbosity", 0)) if not log: log = StringIO() if not settings.TEMPLATE_LOADERS: raise OfflineGenerationError("No template loaders defined. You " "must set TEMPLATE_LOADERS in your " "settings.") paths = set() for loader in self.get_loaders(): try: module = import_module(loader.__module__) get_template_sources = getattr(module, 'get_template_sources', None) if get_template_sources is None: get_template_sources = loader.get_template_sources paths.update(list(get_template_sources(''))) except (ImportError, AttributeError, TypeError): # Yeah, this didn't work out so well, let's move on pass if not paths: raise OfflineGenerationError("No template paths found. None of " "the configured template loaders " "provided template paths. See " "http://django.me/template-loaders " "for more information on template " "loaders.") if verbosity > 1: log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n") templates = set() for path in paths: for root, dirs, files in os.walk(path, followlinks=options.get( 'followlinks', False)): templates.update( os.path.join(root, name) for name in files if not name.startswith('.') and any( fnmatch(name, "*%s" % glob) for glob in extensions)) if not templates: raise OfflineGenerationError("No templates found. Make sure your " "TEMPLATE_LOADERS and TEMPLATE_DIRS " "settings are correct.") if verbosity > 1: log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n") engine = options.get("engine", "django") parser = self.__get_parser(engine) compressor_nodes = SortedDict() for template_name in templates: try: template = parser.parse(template_name) except IOError: # unreadable file -> ignore if verbosity > 0: log.write("Unreadable template at: %s\n" % template_name) continue except TemplateSyntaxError as e: # broken template -> ignore if verbosity > 0: log.write("Invalid template %s: %s\n" % (template_name, e)) continue except TemplateDoesNotExist: # non existent template -> ignore if verbosity > 0: log.write("Non-existent template at: %s\n" % template_name) continue except UnicodeDecodeError: if verbosity > 0: log.write("UnicodeDecodeError while trying to read " "template %s\n" % template_name) try: nodes = list(parser.walk_nodes(template)) except (TemplateDoesNotExist, TemplateSyntaxError) as e: # Could be an error in some base template if verbosity > 0: log.write("Error parsing template %s: %s\n" % (template_name, e)) continue if nodes: template.template_name = template_name compressor_nodes.setdefault(template, []).extend(nodes) if not compressor_nodes: raise OfflineGenerationError( "No 'compress' template tags found in templates." "Try running compress command with --follow-links and/or" "--extension=EXTENSIONS") if verbosity > 0: log.write("Found 'compress' tags in:\n\t" + "\n\t".join((t.template_name for t in compressor_nodes.keys())) + "\n") log.write("Compressing... ") count = 0 results = [] offline_manifest = SortedDict() init_context = parser.get_init_context( settings.COMPRESS_OFFLINE_CONTEXT) for template, nodes in compressor_nodes.items(): context = Context(init_context) template._log = log template._log_verbosity = verbosity if not parser.process_template(template, context): continue for node in nodes: context.push() parser.process_node(template, context, node) rendered = parser.render_nodelist(template, context, node) key = get_offline_hexdigest(rendered) if key in offline_manifest: continue try: result = parser.render_node(template, context, node) except Exception as e: raise CommandError("An error occured during rendering %s: " "%s" % (template.template_name, e)) offline_manifest[key] = result context.pop() results.append(result) count += 1 write_offline_manifest(offline_manifest) log.write("done\nCompressed %d block(s) from %d template(s).\n" % (count, len(compressor_nodes))) return count, results
def test_copy(self): orig = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) copied = copy.copy(orig) self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2]) self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2])
def test_tuple_init(self): d = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}")
def test_dict_equality(self): d = SortedDict((i, i) for i in range(3)) self.assertEqual(d, {0: 0, 1: 1, 2: 2})
import logging from django.template.context import RequestContext from django.template.loader import render_to_string from django.utils import six from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ from reviewboard.accounts.forms.pages import (AccountSettingsForm, ChangePasswordForm, ProfileForm, GroupsForm) _populated = False _registered_form_classes = {} _registered_page_classes = SortedDict() class AccountPage(object): """Base class for a page of forms in the My Account page. Each AccountPage is represented in the My Account page by an entry in the navigation sidebar. When the user has navigated to that page, any forms shown on the page will be displayed. Extensions can provide custom pages in order to offer per-user customization. """ page_id = None page_title = None form_classes = None template_name = 'accounts/prefs_page.html'
def __init__(self, *args, **kwargs): """ This form is used only to create a partner. Had to change self.fields into an OrderDict to preserve order then 'append' to the new fields because new fields need to be first. """ self.user = kwargs.pop('user', '') super(NewPartnerForm, self).__init__(*args, **kwargs) # add location fields to form if this is a new contact if not self.instance.name: notes = self.fields.pop('notes') self.fields.update(LocationForm().fields) self.fields['city'].required = False self.fields['state'].required = False # move notes field to the end self.fields['notes'] = notes for field in self.fields.itervalues(): # primary contact information isn't required to create a partner field.required = False model_fields = SortedDict(self.fields) new_fields = { 'partnername': forms.CharField(label="Partner Organization", max_length=255, required=True, help_text="Name of the Organization", widget=forms.TextInput( attrs={ 'placeholder': 'Partner Organization', 'id': 'id_partner-partnername' })), 'partnersource': forms.CharField( label="Source", max_length=255, required=False, help_text= "Website, event, or other source where you found the partner", widget=forms.TextInput(attrs={ 'placeholder': 'Source', 'id': 'id_partner-partnersource' })), 'partnerurl': forms.URLField( label="URL", max_length=255, required=False, help_text="Full url. ie http://partnerorganization.org", widget=forms.TextInput(attrs={ 'placeholder': 'URL', 'id': 'id_partner-partnerurl' })), 'partner-tags': forms.CharField( label='Tags', max_length=255, required=False, help_text= "ie 'Disability', 'veteran-outreach', etc. Separate tags with a comma.", widget=forms.TextInput(attrs={ 'id': 'p-tags', 'placeholder': 'Tags' })) } ordered_fields = SortedDict(new_fields) ordered_fields.update(model_fields) self.fields = ordered_fields autofocus_input(self, 'partnername')
products = SortedDict([ ( 'desktop', { 'name': _lazy(u'Firefox'), 'subtitle': _lazy(u'Web browser for Windows, Mac and Linux'), 'extra_fields': ['troubleshooting', 'ff_version', 'os', 'plugins'], 'tags': ['desktop'], 'product': 'firefox', 'categories': SortedDict([ # TODO: Just use the IA topics for this. # See bug 979397 ('download-and-install', { 'name': _lazy(u'Download, install and migration'), 'topic': 'download-and-install', 'tags': ['download-and-install'], }), ('privacy-and-security', { 'name': _lazy(u'Privacy and security settings'), 'topic': 'privacy-and-security', 'tags': ['privacy-and-security'], }), ('customize', { 'name': _lazy(u'Customize controls, options and add-ons'), 'topic': 'customize', 'tags': ['customize'], }), ('fix-problems', { 'name': _lazy(u'Fix slowness, crashing, error messages and ' u'other problems'), 'topic': 'fix-problems', 'tags': ['fix-problems'], }), ('tips', { 'name': _lazy(u'Tips and tricks'), 'topic': 'tips', 'tags': ['tips'], }), ('bookmarks', { 'name': _lazy(u'Bookmarks'), 'topic': 'bookmarks', 'tags': ['bookmarks'], }), ('cookies', { 'name': _lazy(u'Cookies'), 'topic': 'cookies', 'tags': ['cookies'], }), ('tabs', { 'name': _lazy(u'Tabs'), 'topic': 'tabs', 'tags': ['tabs'], }), ('websites', { 'name': _lazy(u'Websites'), 'topic': 'websites', 'tags': ['websites'], }), ('sync', { 'name': _lazy(u'Firefox Sync'), 'topic': 'sync', 'tags': ['sync'], }), ('other', { 'name': _lazy(u'Other'), 'topic': 'other', 'tags': ['other'], }), ]) }), ( 'mobile', { 'name': _lazy(u'Firefox for Android'), 'subtitle': _lazy(u'Web browser for Android smartphones and tablets'), 'extra_fields': ['ff_version', 'os', 'plugins'], 'tags': ['mobile'], 'product': 'mobile', 'categories': SortedDict([ # TODO: Just use the IA topics for this. # See bug 979397 ('download-and-install', { 'name': _lazy(u'Download, install and migration'), 'topic': 'download-and-install', 'tags': ['download-and-install'], }), ('privacy-and-security', { 'name': _lazy(u'Privacy and security settings'), 'topic': 'privacy-and-security', 'tags': ['privacy-and-security'], }), ('customize', { 'name': _lazy(u'Customize controls, options and add-ons'), 'topic': 'customize', 'tags': ['customize'], }), ('fix-problems', { 'name': _lazy(u'Fix slowness, crashing, error messages and ' u'other problems'), 'topic': 'fix-problems', 'tags': ['fix-problems'], }), ('tips', { 'name': _lazy(u'Tips and tricks'), 'topic': 'tips', 'tags': ['tips'], }), ('bookmarks', { 'name': _lazy(u'Bookmarks'), 'topic': 'bookmarks', 'tags': ['bookmarks'], }), ('cookies', { 'name': _lazy(u'Cookies'), 'topic': 'cookies', 'tags': ['cookies'], }), ('tabs', { 'name': _lazy(u'Tabs'), 'topic': 'tabs', 'tags': ['tabs'], }), ('websites', { 'name': _lazy(u'Websites'), 'topic': 'websites', 'tags': ['websites'], }), ('sync', { 'name': _lazy(u'Firefox Sync'), 'topic': 'sync', 'tags': ['sync'], }), ('other', { 'name': _lazy(u'Other'), 'topic': 'other', 'tags': ['other'], }), ]) }), ('ios', { 'name': _lazy(u'Firefox for iOS'), 'subtitle': _lazy(u'Firefox for iPhone, iPad and iPod touch devices'), 'extra_fields': ['ff_version', 'os', 'plugins'], 'tags': ['ios'], 'product': 'ios', 'categories': SortedDict([ ('install-and-update-firefox-ios', { 'name': _lazy(u'Install and Update'), 'topic': 'install-and-update-firefox-ios', 'tags': ['install-and-update-firefox-ios'] }), ('how-to-use-firefox-ios', { 'name': _lazy(u'How to use Firefox for iOS'), 'topic': 'how-to-use-firefox-ios', 'tags': ['how-to-use-firefox-ios'] }), ('firefox-ios-not-working-expected', { 'name': _lazy(u'Firefox for iOS is not working as expected'), 'topic': 'firefox-ios-not-working-expected', 'tags': ['firefox-ios-not-working-expected'] }), ]) }), ('focus', { 'name': _lazy(u'Firefox Focus'), 'subtitle': _lazy(u'Automatic privacy browser and content blocker'), 'extra_fields': [], 'tags': ['focus-firefox'], 'product': 'focus-firefox', 'categories': SortedDict([ ('Focus-ios', { 'name': _lazy(u'Firefox Focus for iOS'), 'topic': 'Focus-ios', 'tags': ['Focus-ios'] }), ('firefox-focus-android', { 'name': _lazy(u'Firefox Focus for Android'), 'topic': 'firefox-focus-android', 'tags': ['firefox-focus-android'] }), ]) }), ('firefox-amazon-devices', { 'name': _lazy(u'Firefox for Amazon Devices'), 'subtitle': _lazy(u'Browser for Amazon devices'), 'extra_fields': [], 'tags': ['firefox-amazon'], 'product': 'firefox-amazon-devices', 'categories': SortedDict([ ('firefox-fire-tv', { 'name': _lazy(u'Firefox for Fire TV'), 'topic': 'firefox-fire-tv', 'tags': ['firefox-fire-tv'] }), ('firefox-echo-show', { 'name': _lazy(u'Firefox for Echo Show'), 'topic': 'firefox-echo-show', 'tags': ['firefox-echo-show'] }), ]) }), ( 'thunderbird', { 'name': _lazy(u'Thunderbird'), 'subtitle': _lazy(u'Email software for Windows, Mac and Linux'), 'extra_fields': [], 'tags': [], 'product': 'thunderbird', 'categories': SortedDict([ # TODO: Just use the IA topics for this. # See bug 979397 ('download-and-install', { 'name': _lazy(u'Download, install and migration'), 'topic': 'download-install-and-migration', 'tags': ['download-and-install'], }), ('privacy-and-security', { 'name': _lazy(u'Privacy and security settings'), 'topic': 'privacy-and-security-settings', 'tags': ['privacy-and-security'], }), ('customize', { 'name': _lazy(u'Customize controls, options and add-ons'), 'topic': 'customize-controls-options-and-add-ons', 'tags': ['customize'], }), ('fix-problems', { 'name': _lazy(u'Fix slowness, crashing, error messages and ' u'other problems'), 'topic': 'fix-slowness-crashing-error-messages-and-other-' 'problems', 'tags': ['fix-problems'], }), ('calendar', { 'name': _lazy(u'Calendar'), 'topic': 'calendar', 'tags': ['calendar'], }), ('other', { 'name': _lazy(u'Other'), 'topic': 'other', 'tags': ['other'], }), ]) }), ('firefox-lite', { 'name': _lazy(u'Firefox Lite'), 'subtitle': _lazy(u'Mobile browser for Indonesia'), 'extra_fields': [], 'tags': ['firefox-lite'], 'product': 'firefox-lite', 'categories': SortedDict([ ('get-started', { 'name': _lazy(u'Get started'), 'topic': 'get-started', 'tags': ['get-started'] }), ('fix-problems', { 'name': _lazy(u'Fix problems'), 'topic': 'fix-problems', 'tags': ['fix-problems'] }), ]) }), ( 'webmaker', { 'name': _lazy(u'Webmaker'), 'subtitle': _lazy(u'Tools for creating and teaching the web'), 'extra_fields': [], 'tags': [], 'product': 'webmaker', 'categories': SortedDict([ # TODO: Just use the IA topics for this. # See bug 979397 ('popcorn-maker', { 'name': _lazy(u'Using Popcorn Maker'), 'topic': 'popcorn-maker', 'tags': ['popcorn-maker'], }), ('thimble', { 'name': _lazy(u'Using Thimble'), 'topic': 'thimble', 'tags': ['thimble'], }), ('x-ray-goggles', { 'name': _lazy(u'Using X-Ray Goggles'), 'topic': 'x-ray-goggles', 'tags': ['x-ray-goggles'], }), ('get-the-most-from-webmaker', { 'name': _lazy(u'Using a feature on webmaker.org'), 'topic': 'get-the-most-from-webmaker', 'tags': ['get-the-most-from-webmaker'], }), ('events-and-help-for-mentors', { 'name': _lazy(u'Contributing to Webmaker'), 'topic': 'events-and-help-for-mentors', 'tags': ['events-and-help-for-mentors'], }), ]) }), ('firefox-enterprise', { 'name': _lazy(u'Firefox for Enterprise'), 'subtitle': _lazy(u'Enterprise version of Firefox'), 'extra_fields': [], 'tags': [], 'product': 'firefox-enterprise', 'categories': SortedDict([ ('deployment-firefox-enterprise-environment', { 'name': _lazy(u'Deployment of Firefox in an enterprise environment'), 'topic': 'deployment-firefox-enterprise-environment', 'tags': ['deployment'], }), ('customization-firefox-enterprise-environment', { 'name': _lazy( u'Customization of Firefox in an enterprise environment'), 'topic': 'customization-firefox-enterprise-environment', 'tags': ['customization'], }), ]) }), ('firefox-reality', { 'name': _lazy(u'Firefox Reality'), 'subtitle': _lazy(u'Firefox for Virtual Reality'), 'extra_fields': [], 'tags': [], 'product': 'firefox-reality', 'categories': SortedDict([ ('get-started', { 'name': _lazy(u'Get started with Firefox Reality'), 'topic': 'get-started', 'tags': ['get-started'], }), ('troubleshooting-reality', { 'name': _lazy(u'Troubleshooting Firefox Reality'), 'topic': 'troubleshooting-reality', 'tags': ['troubleshooting'], }), ]) }), ('other', { 'name': _lazy(u'Other Mozilla products'), 'subtitle': '', 'product': '', 'html': _lazy(u'This site only provides support for some of our products. ' u'For other support, please find your product below.' u'<ul class="product-support">' u'<li><a href="http://www.seamonkey-project.org/doc/">' u'SeaMonkey support</a></li>' u'<li><a ' u'href="/questions/new/thunderbird">' u'Lightning support</a></li>' u'</ul>'), 'categories': SortedDict([]), 'deadend': True, }), ])
def get_actions(self): public = { 'method': self.handler.process_public, 'minimal': False, 'label': _lazy(u'Push to public'), 'details': _lazy(u'This will approve the sandboxed app so it ' u'appears on the public side.') } reject = { 'method': self.handler.process_sandbox, 'label': _lazy(u'Reject'), 'minimal': False, 'details': _lazy(u'This will reject the app and remove it from ' u'the review queue.') } info = { 'method': self.handler.request_information, 'label': _lazy(u'Request more information'), 'minimal': True, 'details': _lazy(u'This will send the author(s) an email ' u'requesting more information.') } escalate = { 'method': self.handler.process_escalate, 'label': _lazy(u'Escalate'), 'minimal': True, 'details': _lazy(u'Flag this app for an admin to review.') } comment = { 'method': self.handler.process_comment, 'label': _lazy(u'Comment'), 'minimal': True, 'details': _lazy(u'Make a comment on this app. The author won\'t ' u'be able to see this.') } clear_escalation = { 'method': self.handler.process_clear_escalation, 'label': _lazy(u'Clear Escalation'), 'minimal': True, 'details': _lazy(u'Clear this app from the escalation queue. The ' u'author will get no email or see comments ' u'here.') } clear_rereview = { 'method': self.handler.process_clear_rereview, 'label': _lazy(u'Clear Re-review'), 'minimal': True, 'details': _lazy(u'Clear this app from the re-review queue. The ' u'author will get no email or see comments ' u'here.') } disable = { 'method': self.handler.process_disable, 'label': _lazy(u'Disable app'), 'minimal': True, 'details': _lazy(u'Disable the app, removing it from public ' u'results. Sends comments to author.') } actions = SortedDict() file_status = self.version.files.values_list('status', flat=True) multiple_versions = (File.objects.exclude(version=self.version).filter( version__addon=self.addon, status__in=amo.REVIEWED_STATUSES).exists()) # Public. if ((self.addon.is_packaged and amo.STATUS_PUBLIC not in file_status) or (not self.addon.is_packaged and self.addon.status != amo.STATUS_PUBLIC)): actions['public'] = public # Reject. if self.addon.is_packaged: # Packaged apps reject the file only, or the app itself if there's # only a single version. if (not multiple_versions and self.addon.status not in [amo.STATUS_REJECTED, amo.STATUS_DISABLED]): actions['reject'] = reject elif multiple_versions and amo.STATUS_DISABLED not in file_status: actions['reject'] = reject else: # Hosted apps reject the app itself. if self.addon.status not in [ amo.STATUS_REJECTED, amo.STATUS_DISABLED ]: actions['reject'] = reject # Disable. if (acl.action_allowed(self.handler.request, 'Addons', 'Edit') and (self.addon.status != amo.STATUS_DISABLED or amo.STATUS_DISABLED not in file_status)): actions['disable'] = disable # Clear escalation. if self.handler.in_escalate: actions['clear_escalation'] = clear_escalation # Clear re-review. if self.handler.in_rereview: actions['clear_rereview'] = clear_rereview # Escalate. if not self.handler.in_escalate: actions['escalate'] = escalate # Request info and comment are always shown. actions['info'] = info actions['comment'] = comment return actions
class CouchdbkitHandler(object): """ The couchdbkit handler for django """ # share state between instances __shared_state__ = dict(_databases={}, app_schema=SortedDict()) def __init__(self, databases): """ initialize couchdbkit handler with COUCHDB_DATABASES settings """ self.__dict__ = self.__shared_state__ # Convert old style to new style if isinstance(databases, (list, tuple)): databases = dict((app_name, { 'URL': uri }) for app_name, uri in databases) # create databases sessions for app_name, app_setting in databases.iteritems(): uri = app_setting['URL'] # Do not send credentials when they are both None as admin party will give a 401 user = app_setting.get('USER') password = app_setting.get('PASSWORD') filters = [BasicAuth(user, password) ] if (user or password) is not None else [] try: if isinstance(uri, (list, tuple)): # case when you want to specify server uri # and database name specifically. usefull # when you proxy couchdb on some path server_uri, dbname = uri else: server_uri, dbname = uri.rsplit("/", 1) except ValueError: raise ValueError("couchdb uri [%s:%s] invalid" % (app_name, uri)) res = CouchdbResource(server_uri, timeout=COUCHDB_TIMEOUT, filters=filters) server = Server(server_uri, resource_instance=res) app_label = app_name.split('.')[-1] self._databases[app_label] = (server, dbname) def sync(self, app, verbosity=2, temp=None): """ used to sync views of all applications and eventually create database. When temp is specified, it is appended to the app's name on the docid. It can then be updated in the background and copied over the existing design docs to reduce blocking time of view updates """ app_name = app.__name__.rsplit('.', 1)[0] app_labels = set() schema_list = self.app_schema.values() for schema_dict in schema_list: for schema in schema_dict.values(): app_module = schema.__module__.rsplit(".", 1)[0] if app_module == app_name and not schema._meta.app_label in app_labels: app_labels.add(schema._meta.app_label) for app_label in app_labels: if not app_label in self._databases: continue if verbosity >= 1: print "sync `%s` in CouchDB" % app_name db = self.get_db(app_label) app_path = os.path.abspath( os.path.join(sys.modules[app.__name__].__file__, "..")) design_path = "%s/%s" % (app_path, "_design") if not os.path.isdir(design_path): if settings.DEBUG: print >> sys.stderr, "%s don't exists, no ddoc synchronized" % design_path return if temp: design_name = '%s-%s' % (app_label, temp) else: design_name = app_label docid = "_design/%s" % design_name push(os.path.join(app_path, "_design"), db, force=True, docid=docid) if temp: ddoc = db[docid] view_names = ddoc.get('views', {}).keys() if len(view_names) > 0: if verbosity >= 1: print 'Triggering view rebuild' view = '%s/%s' % (design_name, view_names[0]) list(db.view(view, limit=0)) def copy_designs(self, app, temp, verbosity=2, delete=True): """ Copies temporary view over the existing ones This is used to reduce the waiting time for blocking view updates """ app_name = app.__name__.rsplit('.', 1)[0] app_labels = set() schema_list = self.app_schema.values() for schema_dict in schema_list: for schema in schema_dict.values(): app_module = schema.__module__.rsplit(".", 1)[0] if app_module == app_name and not schema._meta.app_label in app_labels: app_labels.add(schema._meta.app_label) for app_label in app_labels: if not app_label in self._databases: continue if verbosity >= 1: print "Copy prepared design docs for `%s`" % app_name db = self.get_db(app_label) tmp_name = '%s-%s' % (app_label, temp) from_id = '_design/%s' % tmp_name to_id = '_design/%s' % app_label try: db.copy_doc(from_id, to_id) if delete: del db[from_id] except ResourceNotFound: print '%s not found.' % (from_id, ) return def get_db(self, app_label, register=False): """ retrieve db session for a django application """ if register: return db = self._databases[app_label] if isinstance(db, tuple): server, dbname = db db = server.get_or_create_db(dbname) self._databases[app_label] = db return db def register_schema(self, app_label, *schema): """ register a Document object""" for s in schema: schema_name = schema[0].__name__.lower() schema_dict = self.app_schema.setdefault(app_label, SortedDict()) if schema_name in schema_dict: fname1 = os.path.abspath(sys.modules[s.__module__].__file__) fname2 = os.path.abspath( sys.modules[schema_dict[schema_name].__module__].__file__) if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]: continue schema_dict[schema_name] = s def get_schema(self, app_label, schema_name): """ retriev Document object from its name and app name """ return self.app_schema.get(app_label, SortedDict()).get(schema_name.lower())
def handle_noargs(self, *app_labels, **options): from django.db.models import get_app, get_apps, get_model format = options.get('format') indent = options.get('indent') using = options.get('database') excludes = options.get('exclude') show_traceback = options.get('traceback') use_natural_keys = options.get('use_natural_keys') use_base_manager = options.get('use_base_manager') pks = options.get('primary_keys') user = options.get('user') userid = user.id stdout = OutputWrapper(options.get('stdout', sys.stdout)) if pks: primary_keys = pks.split(',') else: primary_keys = [] excluded_apps = set() excluded_models = set() if excludes: for exclude in excludes: if '.' in exclude: app_label, model_name = exclude.split('.', 1) model_obj = get_model(app_label, model_name) if not model_obj: raise CommandError('Unknown model in excludes: %s' % exclude) excluded_models.add(model_obj) else: try: app_obj = get_app(exclude) excluded_apps.add(app_obj) except ImproperlyConfigured: raise CommandError('Unknown app in excludes: %s' % exclude) if len(app_labels) == 0: if primary_keys: raise CommandError( "You can only use --pks option with one model") app_list = SortedDict( (app, None) for app in get_apps() if app not in excluded_apps) else: if len(app_labels) > 1 and primary_keys: raise CommandError( "You can only use --pks option with one model") app_list = SortedDict() for label in app_labels: try: app_label, model_label = label.split('.') try: app = get_app(app_label) except ImproperlyConfigured: raise CommandError("Unknown application: %s" % app_label) if app in excluded_apps: continue model = get_model(app_label, model_label) if model is None: raise CommandError("Unknown model: %s.%s" % (app_label, model_label)) if app in app_list.keys(): if app_list[app] and model not in app_list[app]: app_list[app].append(model) else: app_list[app] = [model] except ValueError: if primary_keys: raise CommandError( "You can only use --pks option with one model") # This is just an app - no model qualifier app_label = label try: app = get_app(app_label) except ImproperlyConfigured: raise CommandError("Unknown application: %s" % app_label) if app in excluded_apps: continue app_list[app] = None # Check that the serialization format exists; this is a shortcut to # avoid collating all the objects and _then_ failing. if format not in serializers.get_public_serializer_formats(): try: serializers.get_serializer(format) except serializers.SerializerDoesNotExist: pass raise CommandError("Unknown serialization format: %s" % format) def get_objects(): # Collate the objects to be serialized. for model in sort_dependencies(app_list.items()): if model in excluded_models: continue if not model._meta.proxy and router.allow_syncdb(using, model): if use_base_manager: objects = model._base_manager else: objects = model._default_manager queryset = objects.using(using).order_by( model._meta.pk.name) if primary_keys: queryset = queryset.filter(pk__in=primary_keys) queryset = queryset.filter(owner_id=userid) for obj in queryset.iterator(): yield obj try: stdout.ending = None # self.stdout.ending = None serializers.serialize(format, get_objects(), indent=indent, use_natural_keys=use_natural_keys, stream=stdout) stdout # serializers.serialize(format, get_objects(), indent=indent, # use_natural_keys=use_natural_keys, stream=self.stdout) except Exception as e: if show_traceback: raise raise CommandError("Unable to serialize database: %s" % e)
def process_response(self, request, response): self.record_stats({ 'settings': SortedDict(sorted(get_safe_settings().items(), key=lambda s: s[0])), })
instances = [] LOG.exception(_('Exception in instance index')) messages.error(request, _('Unable to fetch instances: %s') % e.message) # Gather our volumes try: volumes = api.volume_list(request) except engineclient_exceptions.ClientException, e: volumes = [] LOG.exception("ClientException in volume index") messages.error(request, _('Unable to fetch volumes: %s') % e.message) # Gather our flavors and correlate our instances to them try: flavors = api.flavor_list(request) full_flavors = SortedDict([(str(flavor.id), flavor) for \ flavor in flavors]) for instance in instances: instance.full_flavor = full_flavors[instance.flavor["id"]] except api_exceptions.Unauthorized, e: LOG.exception('Unauthorized attempt to access flavor list.') messages.error(request, _('Unauthorized.')) except Exception, e: if not hasattr(e, 'message'): e.message = str(e) LOG.exception('Exception while fetching flavor info') messages.error(request, _('Unable to get flavor info: %s') % e.message) terminate_form = TerminateInstance() reboot_form = RebootInstance() delete_form = DeleteForm() detach_form = DetachForm()
def get_element_permissions(element, cls): elem_perms = get_elements_with_perms_cls(element, cls, True) elem_perms = SortedDict(elem_perms) elem_perms.keyOrder.sort(key=lambda elem: elem.get_name()) return elem_perms
def get_schema(self, app_label, schema_name): """ retriev Document object from its name and app name """ return self.app_schema.get(app_label, SortedDict()).get(schema_name.lower())
class Options(object): def __init__(self, meta, app_label=None): self.local_fields, self.local_many_to_many = [], [] self.virtual_fields = [] self.module_name, self.verbose_name = None, None self.verbose_name_plural = None self.db_table = '' self.ordering = [] self.unique_together = [] self.permissions = [] self.object_name, self.app_label = None, app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.admin = None self.meta = meta self.pk = None self.has_auto_field, self.auto_field = False, None self.abstract = False self.managed = True self.proxy = False self.proxy_for_model = None self.parents = SortedDict() self.duplicate_targets = {} self.auto_created = False # To handle various inheritance situations, we need to track where # managers came from (concrete or abstract base classes). self.abstract_managers = [] self.concrete_managers = [] # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.util import truncate_name cls._meta = self self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS # First, construct the default values for these options. self.object_name = cls.__name__ self.module_name = self.object_name.lower() self.verbose_name = get_verbose_name(self.object_name) # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith('_'): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) # unique_together can be either a tuple of tuples, or a single # tuple of two strings. Normalize it to a tuple of tuples, so that # calling code can uniformly expect that. ut = meta_attrs.pop('unique_together', self.unique_together) if ut and not isinstance(ut[0], (tuple, list)): ut = (ut, ) self.unique_together = ut # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = string_concat( self.verbose_name, 's') # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys())) else: self.verbose_name_plural = string_concat(self.verbose_name, 's') del self.meta # If the db_table wasn't provided, use the app_label + module_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.module_name) self.db_table = truncate_name(self.db_table, connection.ops.max_name_length()) def _prepare(self, model): if self.order_with_respect_to: self.order_with_respect_to = self.get_field( self.order_with_respect_to) self.ordering = ('_order', ) model.add_to_class('_order', OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = self.parents.value_for_index(0) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [ fld for fld in self.local_fields if fld.name == field.name ] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) else: auto = AutoField(verbose_name='ID', primary_key=True, auto_created=True) model.add_to_class('id', auto) # Determine any sets of fields that are pointing to the same targets # (e.g. two ForeignKeys to the same remote model). The query # construction code needs to know this. At the end of this, # self.duplicate_targets will map each duplicate field column to the # columns it duplicates. collections = {} for column, target in self.duplicate_targets.iteritems(): try: collections[target].add(column) except KeyError: collections[target] = set([column]) self.duplicate_targets = {} for elt in collections.itervalues(): if len(elt) == 1: continue for column in elt: self.duplicate_targets[column] = elt.difference(set([column])) def add_field(self, field): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if field.rel and isinstance(field.rel, ManyToManyRel): self.local_many_to_many.insert( bisect(self.local_many_to_many, field), field) if hasattr(self, '_m2m_cache'): del self._m2m_cache else: self.local_fields.insert(bisect(self.local_fields, field), field) self.setup_pk(field) if hasattr(self, '_field_cache'): del self._field_cache del self._field_name_cache if hasattr(self, '_name_map'): del self._name_map def add_virtual_field(self, field): self.virtual_fields.append(field) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def setup_proxy(self, target): """ Does the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return '<Options for %s>' % self.object_name def __str__(self): return "%s.%s" % (smart_str(self.app_label), smart_str( self.module_name)) def verbose_name_raw(self): """ There are a few places where the untranslated verbose name is needed (so that we get the same value regardless of currently active locale). """ lang = get_language() deactivate_all() raw = force_unicode(self.verbose_name) activate(lang) return raw verbose_name_raw = property(verbose_name_raw) def _fields(self): """ The getter for self.fields. This returns the list of field objects available to this model (including through parent models). Callers are not permitted to modify this list, since it's a reference to this instance (not a copy). """ try: self._field_name_cache except AttributeError: self._fill_fields_cache() return self._field_name_cache fields = property(_fields) def get_fields_with_model(self): """ Returns a sequence of (field, model) pairs for all fields. The "model" element is None for fields on the current model. Mostly of use when constructing queries so that we know which model a field belongs to. """ try: self._field_cache except AttributeError: self._fill_fields_cache() return self._field_cache def _fill_fields_cache(self): cache = [] for parent in self.parents: for field, model in parent._meta.get_fields_with_model(): if model: cache.append((field, model)) else: cache.append((field, parent)) cache.extend([(f, None) for f in self.local_fields]) self._field_cache = tuple(cache) self._field_name_cache = [x for x, _ in cache] def _many_to_many(self): try: self._m2m_cache except AttributeError: self._fill_m2m_cache() return self._m2m_cache.keys() many_to_many = property(_many_to_many) def get_m2m_with_model(self): """ The many-to-many version of get_fields_with_model(). """ try: self._m2m_cache except AttributeError: self._fill_m2m_cache() return self._m2m_cache.items() def _fill_m2m_cache(self): cache = SortedDict() for parent in self.parents: for field, model in parent._meta.get_m2m_with_model(): if model: cache[field] = model else: cache[field] = parent for field in self.local_many_to_many: cache[field] = None self._m2m_cache = cache def get_field(self, name, many_to_many=True): """ Returns the requested field by name. Raises FieldDoesNotExist on error. """ to_search = many_to_many and (self.fields + self.many_to_many) or self.fields for f in to_search: if f.name == name: return f raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name)) def get_field_by_name(self, name): """ Returns the (field_object, model, direct, m2m), where field_object is the Field instance for the given name, model is the model containing this field (None for local fields), direct is True if the field exists on this model, and m2m is True for many-to-many relations. When 'direct' is False, 'field_object' is the corresponding RelatedObject for this field (since the field doesn't have an instance associated with it). Uses a cache internally, so after the first access, this is very fast. """ try: try: return self._name_map[name] except AttributeError: cache = self.init_name_map() return cache[name] except KeyError: raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name)) def get_all_field_names(self): """ Returns a list of all field names that are possible for this model (including reverse relation names). This is used for pretty printing debugging output (a list of choices), so any internal-only field names are not included. """ try: cache = self._name_map except AttributeError: cache = self.init_name_map() names = cache.keys() names.sort() # Internal-only names end with "+" (symmetrical m2m related names being # the main example). Trim them. return [val for val in names if not val.endswith('+')] def init_name_map(self): """ Initialises the field name -> field object mapping. """ cache = {} # We intentionally handle related m2m objects first so that symmetrical # m2m accessor names can be overridden, if necessary. for f, model in self.get_all_related_m2m_objects_with_model(): cache[f.field.related_query_name()] = (f, model, False, True) for f, model in self.get_all_related_objects_with_model(): cache[f.field.related_query_name()] = (f, model, False, False) for f, model in self.get_m2m_with_model(): cache[f.name] = (f, model, True, True) for f, model in self.get_fields_with_model(): cache[f.name] = (f, model, True, False) if app_cache_ready(): self._name_map = cache return cache def get_add_permission(self): return 'add_%s' % self.object_name.lower() def get_change_permission(self): return 'change_%s' % self.object_name.lower() def get_delete_permission(self): return 'delete_%s' % self.object_name.lower() def get_all_related_objects(self, local_only=False, include_hidden=False): return [ k for k, v in self.get_all_related_objects_with_model( local_only=local_only, include_hidden=include_hidden) ] def get_all_related_objects_with_model(self, local_only=False, include_hidden=False): """ Returns a list of (related-object, model) pairs. Similar to get_fields_with_model(). """ try: self._related_objects_cache except AttributeError: self._fill_related_objects_cache() predicates = [] if local_only: predicates.append(lambda k, v: not v) if not include_hidden: predicates.append(lambda k, v: not k.field.rel.is_hidden()) return filter(lambda t: all([p(*t) for p in predicates]), self._related_objects_cache.items()) def _fill_related_objects_cache(self): cache = SortedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_objects_with_model( include_hidden=True): if (obj.field.creation_counter < 0 or obj.field.rel.parent_link ) and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model for klass in get_models(include_auto_created=True): for f in klass._meta.local_fields: if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta: cache[RelatedObject(f.rel.to, klass, f)] = None self._related_objects_cache = cache def get_all_related_many_to_many_objects(self, local_only=False): try: cache = self._related_many_to_many_cache except AttributeError: cache = self._fill_related_many_to_many_cache() if local_only: return [k for k, v in cache.items() if not v] return cache.keys() def get_all_related_m2m_objects_with_model(self): """ Returns a list of (related-m2m-object, model) pairs. Similar to get_fields_with_model(). """ try: cache = self._related_many_to_many_cache except AttributeError: cache = self._fill_related_many_to_many_cache() return cache.items() def _fill_related_many_to_many_cache(self): cache = SortedDict() parent_list = self.get_parent_list() for parent in self.parents: for obj, model in parent._meta.get_all_related_m2m_objects_with_model( ): if obj.field.creation_counter < 0 and obj.model not in parent_list: continue if not model: cache[obj] = parent else: cache[obj] = model for klass in get_models(): for f in klass._meta.local_many_to_many: if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta: cache[RelatedObject(f.rel.to, klass, f)] = None if app_cache_ready(): self._related_many_to_many_cache = cache return cache def get_base_chain(self, model): """ Returns a list of parent classes leading to 'model' (order from closet to most distant ancestor). This has to handle the case were 'model' is a granparent or even more distant relation. """ if not self.parents: return if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res raise TypeError('%r is not an ancestor of this model' % model._meta.module_name) def get_parent_list(self): """ Returns a list of all the ancestor of this model as a list. Useful for determining if something is an ancestor, regardless of lineage. """ result = set() for parent in self.parents: result.add(parent) result.update(parent._meta.get_parent_list()) return result def get_ancestor_link(self, ancestor): """ Returns the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Returns None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link def get_ordered_objects(self): "Returns a list of Options objects that are ordered with respect to this object." if not hasattr(self, '_ordered_objects'): objects = [] # TODO #for klass in get_models(get_app(self.app_label)): # opts = klass._meta # if opts.order_with_respect_to and opts.order_with_respect_to.rel \ # and self == opts.order_with_respect_to.rel.to._meta: # objects.append(opts) self._ordered_objects = objects return self._ordered_objects def pk_index(self): """ Returns the index of the primary key field in the self.fields list. """ return self.fields.index(self.pk)
def build_changelog(docs_path, package_name="mezzanine"): """ Converts Mercurial commits into a changelog in RST format. """ project_path = os.path.join(docs_path, "..") version_file = os.path.join(package_name, "__init__.py") version_var = "__version__" changelog_filename = "CHANGELOG" changelog_file = os.path.join(project_path, changelog_filename) versions = SortedDict() repo = None ignore = ("AUTHORS", "formatting", "typo", "pep8", "pep 8", "whitespace", "README", "trans", "print debug", "debugging", "tabs", "style", "sites", "ignore", "tweak", "cleanup", "minor", "for changeset") hotfixes = { "40cbc47b8d8a": "1.0.9", "a25749986abc": "1.0.10", } # Load the repo. try: from mercurial import ui, hg, error from mercurial.commands import tag except ImportError: pass else: try: ui = ui.ui() repo = hg.repository(ui, project_path) except error.RepoError: return if repo is None: return # Go through each changeset and assign it to the versions dict. changesets = [repo.changectx(changeset) for changeset in repo.changelog] for cs in sorted(changesets, reverse=True, key=_changeset_date): # Check if the file with the version number is in this changeset # and if it is, pull it out and assign it as a variable. files = cs.files() new_version = False # Commit message cleanup hacks. description = cs.description().rstrip(".").replace("\n", ". ") while " " in description: description = description.replace(" ", " ") description = description.replace(". . ", ". ").replace("...", ",") while ".." in description: description = description.replace("..", ".") description = description.replace(":.", ":").replace("n'. t", "n't") words = description.split() # Format var names in commit. for i, word in enumerate(words): if (set("._") & set(word[:-1]) and set(letters) & set(word) and "`" not in word and not word[0].isdigit()): last = "" if word[-1] in ",.": last, word = word[-1], word[:-1] words[i] = "``%s``%s" % (word, last) description = " ".join(words) if version_file in files: for line in cs[version_file].data().split("\n"): if line.startswith(version_var): exec line if locals()[version_var] == "0.1.0": locals()[version_var] = "1.0.0" break versions[locals()[version_var]] = { "changes": [], "date": _changeset_date(cs).strftime("%b %d, %Y") } new_version = len(files) == 1 # Tag new versions. hotfix = hotfixes.get(cs.hex()[:12]) if hotfix or new_version: if hotfix: version_tag = hotfix else: try: version_tag = locals()[version_var] except KeyError: version_tag = None if version_tag and version_tag not in cs.tags(): print "Tagging version %s" % version_tag tag(ui, repo, version_tag, rev=cs.hex()) # Ignore changesets that are merges, bumped the version, closed # a branch, regenerated the changelog itself, contain an ignore # word, or are one word long. merge = len(cs.parents()) > 1 branch_closed = len(files) == 0 changelog_update = changelog_filename in files ignored = [w for w in ignore if w.lower() in description.lower()] one_word = len(description.split()) == 1 if (merge or new_version or branch_closed or changelog_update or ignored or one_word): continue # Ensure we have a current version and if so, add this changeset's # description to it. version = None try: version = locals()[version_var] except KeyError: if not hotfix: continue user = cs.user().split("<")[0].strip() entry = "%s - %s" % (description, user) if hotfix or entry not in versions[version]["changes"]: if hotfix: versions[hotfix] = { "changes": [entry], "date": _changeset_date(cs).strftime("%b %d, %Y"), } else: versions[version]["changes"].insert(0, entry) # Write out the changelog. with open(changelog_file, "w") as f: for version, version_info in versions.items(): header = "Version %s (%s)" % (version, version_info["date"]) f.write("%s\n" % header) f.write("%s\n" % ("-" * len(header))) f.write("\n") if version_info["changes"]: for change in version_info["changes"]: f.write(" * %s\n" % change) else: f.write(" * No changes listed.\n") f.write("\n")
def reset(self): self.indexes = {} self.fields = SortedDict() self._built = False self._fieldnames = {} self._facet_fieldnames = {}
def subform(self, fieldset=()): form = deepcopy(self) form.fields = SortedDict([(key, self.fields[key]) for key in fieldset]) return form
def get_price(self, product, currency, **kwargs): from satchless.pricing.handler import get_product_price_range min_price, max_price = get_product_price_range(product, currency, **kwargs) if min_price is not None and min_price.has_value(): return SortedDict((('min', min_price), ('max', max_price)))
class SortedDictTests(IgnorePendingDeprecationWarningsMixin, SimpleTestCase): def setUp(self): super(SortedDictTests, self).setUp() self.d1 = SortedDict() self.d1[7] = 'seven' self.d1[1] = 'one' self.d1[9] = 'nine' self.d2 = SortedDict() self.d2[1] = 'one' self.d2[9] = 'nine' self.d2[0] = 'nil' self.d2[7] = 'seven' def test_basic_methods(self): self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9]) self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'one', 'nine']) self.assertEqual(list(six.iteritems(self.d1)), [(7, 'seven'), (1, 'one'), (9, 'nine')]) def test_overwrite_ordering(self): """ Overwriting an item keeps its place. """ self.d1[1] = 'ONE' self.assertEqual(list(six.itervalues(self.d1)), ['seven', 'ONE', 'nine']) def test_append_items(self): """ New items go to the end. """ self.d1[0] = 'nil' self.assertEqual(list(six.iterkeys(self.d1)), [7, 1, 9, 0]) def test_delete_and_insert(self): """ Deleting an item, then inserting the same key again will place it at the end. """ del self.d2[7] self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0]) self.d2[7] = 'lucky number 7' self.assertEqual(list(six.iterkeys(self.d2)), [1, 9, 0, 7]) if six.PY2: def test_change_keys(self): """ Changing the keys won't do anything, it's only a copy of the keys dict. This test doesn't make sense under Python 3 because keys is an iterator. """ k = self.d2.keys() k.remove(9) self.assertEqual(self.d2.keys(), [1, 9, 0, 7]) def test_init_keys(self): """ Initialising a SortedDict with two keys will just take the first one. A real dict will actually take the second value so we will too, but we'll keep the ordering from the first key found. """ tuples = ((2, 'two'), (1, 'one'), (2, 'second-two')) d = SortedDict(tuples) self.assertEqual(list(six.iterkeys(d)), [2, 1]) real_dict = dict(tuples) self.assertEqual(sorted(six.itervalues(real_dict)), ['one', 'second-two']) # Here the order of SortedDict values *is* what we are testing self.assertEqual(list(six.itervalues(d)), ['second-two', 'one']) def test_overwrite(self): self.d1[1] = 'not one' self.assertEqual(self.d1[1], 'not one') self.assertEqual(list(six.iterkeys(self.d1)), list(six.iterkeys(self.d1.copy()))) def test_append(self): self.d1[13] = 'thirteen' self.assertEqual(repr(self.d1), "{7: 'seven', 1: 'one', 9: 'nine', 13: 'thirteen'}") def test_pop(self): self.assertEqual(self.d1.pop(1, 'missing'), 'one') self.assertEqual(self.d1.pop(1, 'missing'), 'missing') # We don't know which item will be popped in popitem(), so we'll # just check that the number of keys has decreased. l = len(self.d1) self.d1.popitem() self.assertEqual(l - len(self.d1), 1) def test_dict_equality(self): d = SortedDict((i, i) for i in range(3)) self.assertEqual(d, {0: 0, 1: 1, 2: 2}) def test_tuple_init(self): d = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) self.assertEqual(repr(d), "{1: 'one', 0: 'zero', 2: 'two'}") def test_pickle(self): self.assertEqual(pickle.loads(pickle.dumps(self.d1, 2)), { 7: 'seven', 1: 'one', 9: 'nine' }) def test_copy(self): orig = SortedDict(((1, "one"), (0, "zero"), (2, "two"))) copied = copy.copy(orig) self.assertEqual(list(six.iterkeys(orig)), [1, 0, 2]) self.assertEqual(list(six.iterkeys(copied)), [1, 0, 2]) def test_clear(self): self.d1.clear() self.assertEqual(self.d1, {}) self.assertEqual(self.d1.keyOrder, []) def test_reversed(self): self.assertEqual(list(self.d1), [7, 1, 9]) self.assertEqual(list(self.d2), [1, 9, 0, 7]) self.assertEqual(list(reversed(self.d1)), [9, 1, 7]) self.assertEqual(list(reversed(self.d2)), [7, 0, 9, 1])
def safe_summary(self, encoded): return SortedDict([ (_('algorithm'), self.algorithm), (_('hash'), mask_hash(encoded, show=3)), ])
class UnifiedIndex(object): # Used to collect all the indexes into a cohesive whole. def __init__(self, excluded_indexes=None): self.indexes = {} self.fields = SortedDict() self._built = False self._indexes_setup = False self.excluded_indexes = excluded_indexes or [] self.excluded_indexes_ids = {} self.document_field = getattr(settings, 'HAYSTACK_DOCUMENT_FIELD', 'text') self._fieldnames = {} self._facet_fieldnames = {} def collect_indexes(self): indexes = [] for app in settings.INSTALLED_APPS: mod = importlib.import_module(app) try: search_index_module = importlib.import_module( "%s.search_indexes" % app) except ImportError: if module_has_submodule(mod, 'search_indexes'): raise continue for item_name, item in inspect.getmembers(search_index_module, inspect.isclass): if getattr(item, 'haystack_use_for_indexing', False) and getattr(item, 'get_model', None): # We've got an index. Check if we should be ignoring it. class_path = "%s.search_indexes.%s" % (app, item_name) if class_path in self.excluded_indexes or self.excluded_indexes_ids.get( item_name) == id(item): self.excluded_indexes_ids[str(item_name)] = id(item) continue indexes.append(item()) return indexes def reset(self): self.indexes = {} self.fields = SortedDict() self._built = False self._fieldnames = {} self._facet_fieldnames = {} def build(self, indexes=None): self.reset() if indexes is None: indexes = self.collect_indexes() for index in indexes: model = index.get_model() if model in self.indexes: error_str = "Model '%s' has more than one 'SearchIndex`` handling it. Please exclude either '%s' or '%s' using the 'HAYSTACK_EXCLUDED_INDEXES' setting." % ( model, self.indexes[model], index) log.error(error_str) return #raise ImproperlyConfigured("Model '%s' has more than one 'SearchIndex`` handling it. Please exclude either '%s' or '%s' using the 'HAYSTACK_EXCLUDED_INDEXES' setting." % (model, self.indexes[model], index)) self.indexes[model] = index self.collect_fields(index) self._built = True def collect_fields(self, index): for fieldname, field_object in index.fields.items(): if field_object.document is True: if field_object.index_fieldname != self.document_field: raise SearchFieldError( "All 'SearchIndex' classes must use the same '%s' fieldname for the 'document=True' field. Offending index is '%s'." % (self.document_field, index)) # Stow the index_fieldname so we don't have to get it the hard way again. if fieldname in self._fieldnames and field_object.index_fieldname != self._fieldnames[ fieldname]: # We've already seen this field in the list. Raise an exception if index_fieldname differs. raise SearchFieldError( "All uses of the '%s' field need to use the same 'index_fieldname' attribute." % fieldname) self._fieldnames[fieldname] = field_object.index_fieldname # Stow the facet_fieldname so we don't have to look that up either. if hasattr(field_object, 'facet_for'): if field_object.facet_for: self._facet_fieldnames[field_object.facet_for] = fieldname else: self._facet_fieldnames[ field_object.instance_name] = fieldname # Copy the field in so we've got a unified schema. if not field_object.index_fieldname in self.fields: self.fields[field_object.index_fieldname] = field_object self.fields[field_object.index_fieldname] = copy.copy( field_object) else: # If the field types are different, we can mostly # safely ignore this. The exception is ``MultiValueField``, # in which case we'll use it instead, copying over the # values. if field_object.is_multivalued == True: old_field = self.fields[field_object.index_fieldname] self.fields[field_object.index_fieldname] = field_object self.fields[field_object.index_fieldname] = copy.copy( field_object) # Switch it so we don't have to dupe the remaining # checks. field_object = old_field # We've already got this field in the list. Ensure that # what we hand back is a superset of all options that # affect the schema. if field_object.indexed is True: self.fields[field_object.index_fieldname].indexed = True if field_object.stored is True: self.fields[field_object.index_fieldname].stored = True if field_object.faceted is True: self.fields[field_object.index_fieldname].faceted = True if field_object.use_template is True: self.fields[ field_object.index_fieldname].use_template = True if field_object.null is True: self.fields[field_object.index_fieldname].null = True def setup_indexes(self): if not self._built: self.build() if self._indexes_setup: return for model_ct, index in self.indexes.items(): index._setup_save() index._setup_delete() self._indexes_setup = True def teardown_indexes(self): if not self._built: self.build() for model_ct, index in self.indexes.items(): index._teardown_save() index._teardown_delete() self._indexes_setup = False def get_indexed_models(self): if not self._built: self.build() return self.indexes.keys() def get_index_fieldname(self, field): if not self._built: self.build() return self._fieldnames.get(field) or field def get_index(self, model_klass): if not self._built: self.build() if model_klass not in self.indexes: raise NotHandled('The model %s is not registered' % model_klass.__class__) return self.indexes[model_klass] def get_facet_fieldname(self, field): if not self._built: self.build() for fieldname, field_object in self.fields.items(): if fieldname != field: continue if hasattr(field_object, 'facet_for'): if field_object.facet_for: return field_object.facet_for else: return field_object.instance_name else: return self._facet_fieldnames.get(field) or field return field def all_searchfields(self): if not self._built: self.build() return self.fields
def _generate_bound_fields(self): self.bound_fields = SortedDict([(name, BoundField(self, field, name)) for name, field in self.fields.items()])
class Collector(object): def __init__(self, using): self.using = using self.data = {} # {model: [instances]} self.batches = {} # {model: {field: set([instances])}} self.field_updates = {} # {model: {(field, value): set([instances])}} self.dependencies = {} # {model: set([models])} def add(self, objs, source=None, nullable=False): """ Adds 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. Returns a list of all objects that were not already collected. """ if not objs: return [] new_objs = [] model = objs[0].__class__ instances = self.data.setdefault(model, []) for obj in objs: if obj not in instances: new_objs.append(obj) instances.extend(new_objs) # Nullable relationships can be ignored -- they are nulled out before # deleting, and therefore do not affect the order in which objects have # to be deleted. if new_objs and source is not None and not nullable: self.dependencies.setdefault(source, set()).add(model) return new_objs def add_batch(self, model, field, objs): """ Schedules a batch delete. Every instance of 'model' that is related to an instance of 'obj' through 'field' will be deleted. """ self.batches.setdefault(model, {}).setdefault(field, set()).update(objs) def add_field_update(self, field, value, objs): """ Schedules a field update. 'objs' must be a homogenous iterable collection of model instances (e.g. a QuerySet). """ if not objs: return model = objs[0].__class__ self.field_updates.setdefault(model, {}).setdefault((field, value), set()).update(objs) def collect(self, objs, source=None, nullable=False, collect_related=True, source_attr=None): """ Adds 'objs' to the collection of objects to be deleted as well as all parent instances. 'objs' must be a homogenous iterable collection of model instances (e.g. a QuerySet). If 'collect_related' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. """ if not connections[ self.using].features.supports_deleting_related_objects: collect_related = False new_objs = self.add(objs, source, nullable) if not new_objs: return model = new_objs[0].__class__ # Recursively collect parent models, but not their related objects. # These will be found by meta.get_all_related_objects() for parent_model, ptr in model._meta.parents.iteritems(): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect(parent_objs, source=model, source_attr=ptr.rel.related_name, collect_related=False) if collect_related: for related in model._meta.get_all_related_objects( include_hidden=True): field = related.field if related.model._meta.auto_created: self.add_batch(related.model, field, new_objs) else: sub_objs = self.related_objects(related, new_objs) if not sub_objs: continue field.rel.on_delete(self, field, sub_objs, self.using) # TODO This entire block is only needed as a special case to # support cascade-deletes for GenericRelation. It should be # removed/fixed when the ORM gains a proper abstraction for virtual # or composite fields, and GFKs are reworked to fit into that. for relation in model._meta.many_to_many: if not relation.rel.through: sub_objs = relation.bulk_related_objects( new_objs, self.using) self.collect(sub_objs, source=model, source_attr=relation.rel.related_name, nullable=True) def related_objects(self, related, objs): """ Gets a QuerySet of objects related to ``objs`` via the relation ``related``. """ return related.model._base_manager.using( self.using).filter(**{"%s__in" % related.field.name: objs}) def instances_with_model(self): for model, instances in self.data.iteritems(): for obj in instances: yield model, obj def sort(self): sorted_models = [] models = self.data.keys() while len(sorted_models) < len(models): found = False for model in models: if model in sorted_models: continue dependencies = self.dependencies.get(model) if not (dependencies and dependencies.difference(sorted_models)): sorted_models.append(model) found = True if not found: return self.data = SortedDict([(model, self.data[model]) for model in sorted_models]) @force_managed def delete(self): # sort instance collections for instances in self.data.itervalues(): instances.sort(key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer contraint checks until the # end of a transaction. self.sort() # send pre_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.pre_delete.send(sender=model, instance=obj, using=self.using) # update fields for model, instances_for_fieldvalues in self.field_updates.iteritems(): query = sql.UpdateQuery(model) for (field, value), instances in instances_for_fieldvalues.iteritems(): query.update_batch([obj.pk for obj in instances], {field.name: value}, self.using) # reverse instance collections for instances in self.data.itervalues(): instances.reverse() # delete batches for model, batches in self.batches.iteritems(): query = sql.DeleteQuery(model) for field, instances in batches.iteritems(): query.delete_batch([obj.pk for obj in instances], self.using, field) # delete instances for model, instances in self.data.iteritems(): query = sql.DeleteQuery(model) pk_list = [obj.pk for obj in instances] query.delete_batch(pk_list, self.using) # send post_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.post_delete.send(sender=model, instance=obj, using=self.using) # update collected instances for model, instances_for_fieldvalues in self.field_updates.iteritems(): for (field, value), instances in instances_for_fieldvalues.iteritems(): for obj in instances: setattr(obj, field.attname, value) for model, instances in self.data.iteritems(): for instance in instances: setattr(instance, model._meta.pk.attname, None)
def get_choices(self): return SortedDict((str(k), v) for k, v in settings.LOG_LEVELS)
def _create_form(form, title=None, description=None): """ Creates a form class object. Usage:: FormClass = _create_form(dataform="myForm") form = FormClass(data=request.POST) :param form: a data form slug or object :param title: optional title; pulled from DB by default :param description: optional description; pulled from DB by default :param readonly: optional readonly; converts form fields to be readonly. Usefull for display only logic. """ # Make sure the form definition exists before continuing # Slightly evil, do type checking to see if form is a DataForm object or string # If form object is a slug then get the form object and reassign if isinstance(form, str) or isinstance(form, unicode): try: form = DataForm.objects.get(visible=True, slug=form) except DataForm.DoesNotExist: raise DataForm.DoesNotExist('DataForm %s does not exist. Make sure the slug name is correct and the form is visible.' % form) # Otherwise it should be a form model object, if not raise elif not isinstance(form, DataForm): raise AttributeError('Dataform %s is not a valid data form object.' % form) meta = {} slug = form if isinstance(form, str) or isinstance(form, unicode) else form.slug final_fields = SortedDict() choices_dict = defaultdict(tuple) attrs = { 'declared_fields' : final_fields, 'base_fields' : final_fields, 'meta' : meta, 'slug' : slug, } # Parse the slug and create a class title form_class_title = create_form_class_title(slug) # Set the title and/or the description from the DB (but only if it wasn't given) meta['title'] = safe(form.title if not title else title) meta['description'] = safe(form.description if not description else description) meta['slug'] = form.slug # Get all the fields fields_qs = Field.objects.filter( dataformfield__data_form__slug=slug, visible=True ).order_by('dataformfield__order') fields = [field for field in fields_qs.values()] if not fields: raise Field.DoesNotExist('Field for %s do not exist. Make sure the slug name is correct and the fields are visible.' % slug) # Get all the choices associated to fields choices_qs = ( FieldChoice.objects.select_related('choice', 'field').filter( field__dataformfield__data_form__slug=slug, field__visible=True ).order_by('order') ) # Get the bindings for use in the Field Loop bindings = get_bindings(form=form) # Add a hidden field used for passing information to the JavaScript bindings function fields.append({ 'field_type': 'HiddenInput', 'slug': 'js_dataform_bindings', 'initial': safe(force_escape(json.dumps(bindings))), 'required': False, }) # Populate our choices dictionary for row in choices_qs: choices_dict[row.field.pk] += (row.choice.value, safe(row.choice.title)), # Process the field mappings and import any modules specified by string name for key in FIELD_MAPPINGS: # Replace the string arguments with the actual modules or classes for sub_key in ('class', 'widget'): if not FIELD_MAPPINGS[key].has_key(sub_key): continue value = FIELD_MAPPINGS[key][sub_key] if isinstance(value, str) or isinstance(value, unicode): names = value.split(".") module_name = ".".join(names[:-1]) class_name = names[-1] module = __import__(module_name, fromlist=[class_name]) # Replace the string with a class pointer FIELD_MAPPINGS[key][sub_key] = getattr(module, class_name) # Handle widget arguments if not FIELD_MAPPINGS[key].has_key('widget_kwargs'): # Initialize all field-mappings that don't have a 'widget_kwargs' key FIELD_MAPPINGS[key]['widget_kwargs'] = {} # ----- Field Loop ----- # Populate our fields dictionary for this form for row in fields: form_field_name = _field_for_form(name=row['slug'], form=slug) field_kwargs = {} field_map = FIELD_MAPPINGS[row['field_type']] widget_attrs = field_map.get('widget_attrs', {}) if row.has_key('label'): field_kwargs['label'] = safe(row['label']) if row.has_key('help_text'): field_kwargs['help_text'] = safe(row['help_text']) if row.has_key('initial'): field_kwargs['initial'] = row['initial'] if row.has_key('required'): field_kwargs['required'] = row['required'] additional_field_kwargs = {} if row.has_key('arguments') and row['arguments'].strip(): # Parse any additional field arguments as JSON and include them in field_kwargs temp_args = json.loads(str(row['arguments'])) for arg in temp_args: additional_field_kwargs[str(arg)] = temp_args[arg] # Update the field arguments with the "additional arguments" JSON in the DB field_kwargs.update(additional_field_kwargs) # Get the choices for single and multiple choice fields if row['field_type'] in CHOICE_FIELDS: choices = () # We add a separator for select boxes if row['field_type'] == 'Select': choices += ('', '--------'), choices_func = getattr(choices_module, row['slug'].replace('-', '_'), None) # Populate our choices tuple if choices_func: choices += choices_func() else: choices += choices_dict[row['id']] field_kwargs['choices'] = choices if row['field_type'] in MULTI_CHOICE_FIELDS: # Get all of the specified default selected values (as a list, even if one element) field_kwargs['initial'] = ( field_kwargs['initial'].split(',') if ',' in field_kwargs['initial'] else [field_kwargs['initial'], ] ) # Remove whitespace so the user can use spaces field_kwargs['initial'] = [element.strip() for element in field_kwargs['initial']] else: field_kwargs['initial'] = ''.join(field_kwargs['initial']) # Add our additional css classes if row.has_key('classes'): existing_widget_attrs = widget_attrs.get('class', '') widget_attrs['class'] = existing_widget_attrs + ' '.join(row['classes'].split(',')).strip() # Add bindings css class #FIXME: Should we be adding this on the widget or field? if row['field_type'] != 'HiddenInput': if not 'dataform-field' in widget_attrs['class']: widget_attrs['class'] += " dataform-field" # Instantiate the widget that this field will use # TODO: Possibly create logic that passes submissionid to file upload widget to handle file # paths without enforcing a redirect. if field_map.has_key('widget'): field_kwargs['widget'] = field_map['widget'](attrs=widget_attrs, **field_map['widget_kwargs']) # Add this field, including any widgets and additional arguments # (initial, label, required, help_text, etc) final_field = field_map['class'](**field_kwargs) final_field.is_checkbox = (row['field_type'] == 'CheckboxInput') final_field.dataform_key = row['field_type'] final_fields[form_field_name] = final_field # Grab the dynamic validation function from validation.py if validation_module: validate = getattr(validation_module, form_class_title, None) if validate: # Pull the "clean_" functions from the validation # for this form and inject them into the form object for attr_name in dir(validate): if attr_name.startswith('clean'): attrs[attr_name] = getattr(validate, attr_name) # Return a class object of this form with all attributes DataFormClass = type(form_class_title, (BaseDataForm,), attrs) # Also return the querysets so that they can be re-used query_data = { 'dataform_query' : form, 'choice_query' : choices_qs, 'field_query' : fields_qs, 'fields_list' : fields, } return DataFormClass, query_data
class DataTable(object): """ A class which defines a table with all data and associated actions. .. attribute:: name String. Read-only access to the name specified in the table's Meta options. .. attribute:: multi_select Boolean. Read-only access to whether or not this table should display a column for multi-select checkboxes. .. attribute:: data Read-only access to the data this table represents. .. attribute:: filtered_data Read-only access to the data this table represents, filtered by the :meth:`~horizon.tables.FilterAction.filter` method of the table's :class:`~horizon.tables.FilterAction` class (if one is provided) using the current request's query parameters. """ __metaclass__ = DataTableMetaclass def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs): self._meta.request = request self._meta.data = data self.kwargs = kwargs self._needs_form_wrapper = needs_form_wrapper # Create a new set columns = [] for key, _column in self._columns.items(): column = copy.copy(_column) column.table = self columns.append((key, column)) self.columns = SortedDict(columns) self._populate_data_cache() # Associate these actions with this table for action in self.base_actions.values(): action.table = self self.needs_summary_row = any([col.summation for col in self.columns.values()]) def __unicode__(self): return unicode(self._meta.verbose_name) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.name) @property def name(self): return self._meta.name @property def data(self): return self._meta.data @data.setter def data(self, data): self._meta.data = data @property def multi_select(self): return self._meta.multi_select @property def filtered_data(self): if not hasattr(self, '_filtered_data'): self._filtered_data = self.data if self._meta.filter and self._meta._filter_action: action = self._meta._filter_action filter_string = self.get_filter_string() request_method = self._meta.request.method if filter_string and request_method == action.method: self._filtered_data = action.filter(self, self.data, filter_string) return self._filtered_data def get_filter_string(self): filter_action = self._meta._filter_action param_name = filter_action.get_param_name() filter_string = self._meta.request.POST.get(param_name, '') return filter_string def _populate_data_cache(self): self._data_cache = {} # Set up hash tables to store data points for each column for column in self.get_columns(): self._data_cache[column] = {} def _filter_action(self, action, request, datum=None): try: # Catch user errors in permission functions here return action._allowed(request, datum) except Exception: LOG.exception("Error while checking action permissions.") return None def render(self): """ Renders the table using the template from the table options. """ table_template = template.loader.get_template(self._meta.template) extra_context = {self._meta.context_var_name: self} context = template.RequestContext(self._meta.request, extra_context) return table_template.render(context) def get_absolute_url(self): """ Returns the canonical URL for this table. This is used for the POST action attribute on the form element wrapping the table. In many cases it is also useful for redirecting after a successful action on the table. For convenience it defaults to the value of ``request.get_full_path()`` with any query string stripped off, e.g. the path at which the table was requested. """ return self._meta.request.get_full_path().partition('?')[0] def get_empty_message(self): """ Returns the message to be displayed when there is no data. """ return _("No items to display.") def get_object_by_id(self, lookup): """ Returns the data object from the table's dataset which matches the ``lookup`` parameter specified. An error will be raised if the match is not a single data object. Uses :meth:`~horizon.tables.DataTable.get_object_id` internally. """ matches = [datum for datum in self.data if self.get_object_id(datum) == lookup] if len(matches) > 1: raise ValueError("Multiple matches were returned for that id: %s." % matches) if not matches: raise exceptions.Http302(self.get_absolute_url(), _('No match returned for the id "%s".') % lookup) return matches[0] @property def has_actions(self): """ Boolean. Indicates whether there are any available actions on this table. """ if not self.base_actions: return False return any(self.get_table_actions()) or any(self._meta.row_actions) @property def needs_form_wrapper(self): """ Boolean. Indicates whather this table should be rendered wrapped in a ``<form>`` tag or not. """ # If needs_form_wrapper is explicitly set, defer to that. if self._needs_form_wrapper is not None: return self._needs_form_wrapper # Otherwise calculate whether or not we need a form element. return self.has_actions def get_table_actions(self): """ Returns a list of the action instances for this table. """ bound_actions = [self.base_actions[action.name] for action in self._meta.table_actions] return [action for action in bound_actions if self._filter_action(action, self._meta.request)] def get_row_actions(self, datum): """ Returns a list of the action instances for a specific row. """ bound_actions = [] for action in self._meta.row_actions: # Copy to allow modifying properties per row bound_action = copy.copy(self.base_actions[action.name]) bound_action.attrs = copy.copy(bound_action.attrs) bound_action.datum = datum # Remove disallowed actions. if not self._filter_action(bound_action, self._meta.request, datum): continue # Hook for modifying actions based on data. No-op by default. bound_action.update(self._meta.request, datum) # Pre-create the URL for this link with appropriate parameters if issubclass(bound_action.__class__, LinkAction): bound_action.bound_url = bound_action.get_link_url(datum) bound_actions.append(bound_action) return bound_actions def render_table_actions(self): """ Renders the actions specified in ``Meta.table_actions``. """ template_path = self._meta.table_actions_template table_actions_template = template.loader.get_template(template_path) bound_actions = self.get_table_actions() extra_context = {"table_actions": bound_actions} if self._meta.filter: extra_context["filter"] = self._meta._filter_action context = template.RequestContext(self._meta.request, extra_context) return table_actions_template.render(context) def render_row_actions(self, datum): """ Renders the actions specified in ``Meta.row_actions`` using the current row data. """ template_path = self._meta.row_actions_template row_actions_template = template.loader.get_template(template_path) bound_actions = self.get_row_actions(datum) extra_context = {"row_actions": bound_actions, "row_id": self.get_object_id(datum)} context = template.RequestContext(self._meta.request, extra_context) return row_actions_template.render(context) @staticmethod def parse_action(action_string): """ Parses the ``action`` parameter (a string) sent back with the POST data. By default this parses a string formatted as ``{{ table_name }}__{{ action_name }}__{{ row_id }}`` and returns each of the pieces. The ``row_id`` is optional. """ if action_string: bits = action_string.split(STRING_SEPARATOR) bits.reverse() table = bits.pop() action = bits.pop() try: object_id = bits.pop() except IndexError: object_id = None return table, action, object_id def take_action(self, action_name, obj_id=None, obj_ids=None): """ Locates the appropriate action and routes the object data to it. The action should return an HTTP redirect if successful, or a value which evaluates to ``False`` if unsuccessful. """ # See if we have a list of ids obj_ids = obj_ids or self._meta.request.POST.getlist('object_ids') action = self.base_actions.get(action_name, None) if not action or action.method != self._meta.request.method: # We either didn't get an action or we're being hacked. Goodbye. return None # Meanhile, back in Gotham... if not action.requires_input or obj_id or obj_ids: if obj_id: obj_id = self.sanitize_id(obj_id) if obj_ids: obj_ids = [self.sanitize_id(i) for i in obj_ids] # Single handling is easy if not action.handles_multiple: response = action.single(self, self._meta.request, obj_id) # Otherwise figure out what to pass along else: # Preference given to a specific id, since that implies # the user selected an action for just one row. if obj_id: obj_ids = [obj_id] response = action.multiple(self, self._meta.request, obj_ids) return response elif action and action.requires_input and not (obj_id or obj_ids): messages.info(self._meta.request, _("Please select a row before taking that action.")) return None @classmethod def check_handler(cls, request): """ Determine whether the request should be handled by this table. """ if request.method == "POST" and "action" in request.POST: table, action, obj_id = cls.parse_action(request.POST["action"]) elif "table" in request.GET and "action" in request.GET: table = request.GET["table"] action = request.GET["action"] obj_id = request.GET.get("obj_id", None) else: table = action = obj_id = None return table, action, obj_id def maybe_preempt(self): """ Determine whether the request should be handled by a preemptive action on this table or by an AJAX row update before loading any data. """ request = self._meta.request table_name, action_name, obj_id = self.check_handler(request) if table_name == self.name: # Handle AJAX row updating. new_row = self._meta.row_class(self) if new_row.ajax and new_row.ajax_action_name == action_name: try: datum = new_row.get_data(request, obj_id) new_row.load_cells(datum) error = False except: datum = None error = exceptions.handle(request, ignore=True) if request.is_ajax(): if not error: return HttpResponse(new_row.render()) else: return HttpResponse(status=error.status_code) preemptive_actions = [action for action in self.base_actions.values() if action.preempt] if action_name: for action in preemptive_actions: if action.name == action_name: handled = self.take_action(action_name, obj_id) if handled: return handled return None def maybe_handle(self): """ Determine whether the request should be handled by any action on this table after data has been loaded. """ request = self._meta.request table_name, action_name, obj_id = self.check_handler(request) if table_name == self.name and action_name: return self.take_action(action_name, obj_id) return None def sanitize_id(self, obj_id): """ Override to modify an incoming obj_id to match existing API data types or modify the format. """ return obj_id def get_object_id(self, datum): """ Returns the identifier for the object this row will represent. By default this returns an ``id`` attribute on the given object, but this can be overridden to return other values. .. warning:: Make sure that the value returned is a unique value for the id otherwise rendering issues can occur. """ return datum.id def get_object_display(self, datum): """ Returns a display name that identifies this object. By default, this returns a ``name`` attribute from the given object, but this can be overriden to return other values. """ return datum.name def has_more_data(self): """ Returns a boolean value indicating whether there is more data available to this table from the source (generally an API). The method is largely meant for internal use, but if you want to override it to provide custom behavior you can do so at your own risk. """ return self._meta.has_more_data def get_marker(self): """ Returns the identifier for the last object in the current data set for APIs that use marker/limit-based paging. """ return http.urlquote_plus(self.get_object_id(self.data[-1])) def get_pagination_string(self): """ Returns the query parameter string to paginate this table. """ return "=".join([self._meta.pagination_param, self.get_marker()]) def calculate_row_status(self, statuses): """ Returns a boolean value determining the overall row status based on the dictionary of column name to status mappings passed in. By default, it uses the following logic: #. If any statuses are ``False``, return ``False``. #. If no statuses are ``False`` but any or ``None``, return ``None``. #. If all statuses are ``True``, return ``True``. This provides the greatest protection against false positives without weighting any particular columns. The ``statuses`` parameter is passed in as a dictionary mapping column names to their statuses in order to allow this function to be overridden in such a way as to weight one column's status over another should that behavior be desired. """ values = statuses.values() if any([status is False for status in values]): return False elif any([status is None for status in values]): return None else: return True def get_row_status_class(self, status): """ Returns a css class name determined by the status value. This class name is used to indicate the status of the rows in the table if any ``status_columns`` have been specified. """ if status is True: return "status_up" elif status is False: return "status_down" else: return "status_unknown" def get_columns(self): """ Returns this table's columns including auto-generated ones.""" return self.columns.values() def get_rows(self): """ Return the row data for this table broken out by columns. """ rows = [] try: for datum in self.filtered_data: rows.append(self._meta.row_class(self, datum)) except: # Exceptions can be swallowed at the template level here, # re-raising as a TemplateSyntaxError makes them visible. LOG.exception("Error while rendering table rows.") exc_info = sys.exc_info() raise template.TemplateSyntaxError, exc_info[1], exc_info[2] return rows
results = [] for path in os.listdir(os.path.join(MODULE_ROOT, 'locale')): if path.startswith('.'): continue results.append(path) return results MODULE_ROOT = os.path.dirname(__import__('sentry').__file__) DATA_ROOT = os.path.join(MODULE_ROOT, 'data') SORT_OPTIONS = SortedDict(( ('priority', _('Priority')), ('date', _('Last Seen')), ('new', _('First Seen')), ('freq', _('Frequency')), ('tottime', _('Total Time Spent')), ('avgtime', _('Average Time Spent')), ('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})), ('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})), )) SORT_CLAUSES = { 'priority': 'sentry_groupedmessage.score', 'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)', 'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)', 'freq': 'sentry_groupedmessage.times_seen', 'tottime':
class Row(html.HTMLElement): """ Represents a row in the table. When iterated, the ``Row`` instance will yield each of its cells. Rows are capable of AJAX updating, with a little added work: The ``ajax`` property needs to be set to ``True``, and subclasses need to define a ``get_data`` method which returns a data object appropriate for consumption by the table (effectively the "get" lookup versus the table's "list" lookup). The automatic update interval is configurable by setting the key ``ajax_poll_interval`` in the ``settings.HORIZON_CONFIG`` dictionary. Default: ``2500`` (measured in milliseconds). .. attribute:: table The table which this row belongs to. .. attribute:: datum The data object which this row represents. .. attribute:: id A string uniquely representing this row composed of the table name and the row data object's identifier. .. attribute:: cells The cells belonging to this row stored in a ``SortedDict`` object. This attribute is populated during instantiation. .. attribute:: status Boolean value representing the status of this row calculated from the values of the table's ``status_columns`` if they are set. .. attribute:: status_class Returns a css class for the status of the row based on ``status``. .. attribute:: ajax Boolean value to determine whether ajax updating for this row is enabled. .. attribute:: ajax_action_name String that is used for the query parameter key to request AJAX updates. Generally you won't need to change this value. Default: ``"row_update"``. """ ajax = False ajax_action_name = "row_update" def __init__(self, table, datum=None): super(Row, self).__init__() self.table = table self.datum = datum if self.datum: self.load_cells() else: self.id = None self.cells = [] def load_cells(self, datum=None): """ Load the row's data (either provided at initialization or as an argument to this function), initiailize all the cells contained by this row, and set the appropriate row properties which require the row's data to be determined. This function is called automatically by :meth:`~horizon.tables.Row.__init__` if the ``datum`` argument is provided. However, by not providing the data during initialization this function allows for the possibility of a two-step loading pattern when you need a row instance but don't yet have the data available. """ # Compile all the cells on instantiation. table = self.table if datum: self.datum = datum else: datum = self.datum cells = [] for column in table.columns.values(): if column.auto == "multi_select": widget = forms.CheckboxInput(check_test=False) # Convert value to string to avoid accidental type conversion data = widget.render('object_ids', unicode(table.get_object_id(datum))) table._data_cache[column][table.get_object_id(datum)] = data elif column.auto == "actions": data = table.render_row_actions(datum) table._data_cache[column][table.get_object_id(datum)] = data else: data = column.get_data(datum) cell = Cell(datum, data, column, self) cells.append((column.name or column.auto, cell)) self.cells = SortedDict(cells) if self.ajax: interval = settings.HORIZON_CONFIG.get('ajax_poll_interval', 2500) self.attrs['data-update-interval'] = interval self.attrs['data-update-url'] = self.get_ajax_update_url() self.classes.append("ajax-update") # Add the row's status class and id to the attributes to be rendered. self.classes.append(self.status_class) id_vals = {"table": self.table.name, "sep": STRING_SEPARATOR, "id": table.get_object_id(datum)} self.id = "%(table)s%(sep)srow%(sep)s%(id)s" % id_vals self.attrs['id'] = self.id def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.id) def __iter__(self): return iter(self.cells.values()) @property def status(self): column_names = self.table._meta.status_columns if column_names: statuses = dict([(column_name, self.cells[column_name].status) for column_name in column_names]) return self.table.calculate_row_status(statuses) @property def status_class(self): column_names = self.table._meta.status_columns if column_names: return self.table.get_row_status_class(self.status) else: return '' def render(self): return render_to_string("horizon/common/_data_table_row.html", {"row": self}) def get_cells(self): """ Returns the bound cells for this row in order. """ return self.cells.values() def get_ajax_update_url(self): table_url = self.table.get_absolute_url() params = urlencode({"table": self.table.name, "action": self.ajax_action_name, "obj_id": self.table.get_object_id(self.datum)}) return "%s?%s" % (table_url, params) def get_data(self, request, obj_id): """ Fetches the updated data for the row based on the object id passed in. Must be implemented by a subclass to allow AJAX updating. """ raise NotImplementedError("You must define a get_data method on %s" % self.__class__.__name__)
""" :param request: request object :return view<'volume_manage/create.html'>: the view create volume table """ form = CreateVolumeForm() try: tenants = api.tenant_list(request, getattr(request.user, 'is_superuser', True)) except Unauthorized: raise except Exception, ex: LOG.error('tenants not found,the error is %s' % ex) tenants = [] tenants_dic = SortedDict([(tenant.id, getattr(tenant, "name")) for tenant in tenants if tenant.enabled]) current_tenant = getattr(request.user, 'tenant_id', None) try: if tenant_id != 'tenant': if switch_tenants(request, tenant_id): usages = quotas.tenant_quota_usages(request) else: usages = quotas.tenant_quota_usages(request) except Unauthorized: raise except Exception, ex: LOG.error("usages not found ,the error is %s" % ex) usages = None