def __init__(self, type_cast=False): GeneratorBase.__init__(self) self.lines = [] #self.imported = set() self.variables = {} self.all_variables = set() self.namespace = set() self.exceptions = {} self.items = { CGType.METHOD: self.add_method, CGType.PROP: self.add_prop, CGType.ATTR: self.add_attr, CGType.FIELD: self.add_field, CGType.ELEMENT: self.add_element, CGType.STRUCT: self.create_struct, CGType.SEQ: self.create_seq, CGType.PSEUD_PROP: self.add_method, CGType.SERVICE: self.create_service, CGType.CONTEXT: self.get_component_context, CGType.VARIABLE: self.declare_variable } #self.propertyset_imported = False self.get_property_exceptions_added = False self.set_property_exceptions_added = False self.mcf_declared = False self.add_namespace('System', unoidl=False) self.add_namespace('com.sun.star.uno')
def clean_filename(url, ans): # Split outdir and our file/dir under outdir # (Note: ans may not be a valid filename) global config (par, ans) = (ans[: len(config.outdir)], ans[len(config.outdir) :]) if ans.startswith(os.sep): ans = ans[1:] # Replace % escape codes with underscores, dashes with underscores. while "%%" in ans: ans = ans[: ans.index("%%")] + "_" + ans[ans.index("%%") + 2 :] while "%25" in ans: ans = ans[: ans.index("%25")] + "_" + ans[ans.index("%25") + 5 :] while "%" in ans: ans = ans[: ans.index("%")] + "_" + ans[ans.index("%") + 3 :] ans = ans.replace("-", "_") while "__" in ans: ans = ans.replace("__", "_") while "_." in ans: ans = ans.replace("_.", ".") # Rename math thumbnails if "/math/" in url: tail = os.path.split(ans)[1] if os.path.splitext(tail)[1] == ".png": tail = os.path.splitext(tail)[0] if set(tail) <= set("0123456789abcdef") and len(tail) == 32: ans = "math_" + hashlib.md5(tail).hexdigest()[:4] + ".png" return os.path.join(par, ans)
def __init__(self): self.num_states = 0 self.names = [] self.transitions = {} self.start_states = set() self.final_states = set() self.unmergeable_states = set()
def get_py_method_def(self, name): """ Returns an array element to use in a PyMethodDef table. Should only be called after code generation. name -- python wrapper/method name """ if len(self.all_wrappers) == 1 \ and not getattr(self.all_wrappers[0], 'NEEDS_OVERLOADING_INTERFACE', False): return self.all_wrappers[0].get_py_method_def(name) else: self._normalize_py_method_flags() flags = self.all_wrappers[0].get_py_method_def_flags() ## detect inconsistencies in flags; they must all be the same if __debug__: for func in self.all_wrappers: try: assert set(func.get_py_method_def_flags()) == set(flags),\ ("Expected PyMethodDef flags %r, got %r" % (flags, func.get_py_method_def_flags())) except (TypeConfigurationError, CodeGenerationError, NotSupportedError): pass docstring = None # FIXME assert isinstance(self.wrapper_return, basestring) assert isinstance(self.wrapper_actual_name, basestring) assert isinstance(self.wrapper_args, list) return "{(char *) \"%s\", (PyCFunction) %s, %s, %s }," % \ (name, self.wrapper_actual_name, '|'.join(flags), (docstring is None and "NULL" or ('"'+docstring+'"')))
def do_include(mlist, msg, msgdata, recips): # regular_include_lists are the other mailing lists on this mailman # installation whose members are included in the regular (non-digest) # delivery if those list addresses don't appear in To: or Cc: headers. if not mlist.regular_include_lists: return recips recips = set(recips) destinations = email.Utils.getaddresses(msg.get_all('to', []) + msg.get_all('cc', [])) destinations = [y.lower() for x,y in destinations] for listname in mlist.regular_include_lists: listname = listname.lower() if listname in destinations: continue listlhs, hostname = listname.split('@') if listlhs == mlist.internal_name(): syslog('error', 'Include list %s is a self reference.', listname) continue try: slist = MailList(listlhs, lock=False) except MMUnknownListError: syslog('error', 'Include list %s not found.', listname) continue if not mm_cfg.ALLOW_CROSS_DOMAIN_SIBLING \ and slist.host_name != hostname: syslog('error', 'Include list %s is not in the same domain.', listname) continue srecips = set([slist.getMemberCPAddress(m) for m in slist.getRegularMemberKeys() if slist.getDeliveryStatus(m) == ENABLED]) recips |= srecips return list(recips)
def check_api_dict(d): """Check that an api dict is valid (does not use the same index twice).""" # We have if a same index is used twice: we 'revert' the dict so that index # become keys. If the length is different, it means one index has been used # at least twice revert_dict = dict([(v, k) for k, v in d.items()]) if not len(revert_dict) == len(d): # We compute a dict index -> list of associated items doubled = {} for name, index in d.items(): try: doubled[index].append(name) except KeyError: doubled[index] = [name] msg = """\ Same index has been used twice in api definition: %s """ % ['index %d -> %s' % (index, names) for index, names in doubled.items() \ if len(names) != 1] raise ValueError(msg) # No 'hole' in the indexes may be allowed, and it must starts at 0 indexes = set(d.values()) expected = set(range(len(indexes))) if not indexes == expected: diff = expected.symmetric_difference(indexes) msg = "There are some holes in the API indexing: " \ "(symmetric diff is %s)" % diff raise ValueError(msg)
def __init__(self, site=None, backend=None): self.query_filter = SearchNode() self.order_by = [] self.models = set() self.boost = {} self.start_offset = 0 self.end_offset = None self.highlight = False self.facets = set() self.date_facets = {} self.query_facets = [] self.narrow_queries = set() self._raw_query = None self._raw_query_params = {} self._more_like_this = False self._mlt_instance = None self._results = None self._hit_count = None self._facet_counts = None self._spelling_suggestion = None if backend is not None: self.backend = backend else: self.backend = SearchBackend(site=site)
def run(self): self.init(database = True, write = False) if not ctx.get_option('without_buildno'): installed = self.installdb.list_installed() else: installed = self.installdb.list_installed_without_buildno() component = ctx.get_option('component') if component: #FIXME: pisi api is insufficient to do this from sets import Set as set component_pkgs = self.componentdb.get_union_packages(component, walk=True) installed = list(set(installed) & set(component_pkgs)) installed.sort() if self.options.install_info: ctx.ui.info(_('Package Name |St| Version| Rel.| Build| Distro| Date')) print '========================================================================' for pkg in installed: package = self.installdb.get_package(pkg) inst_info = self.installdb.get_info(pkg) if self.options.long: ctx.ui.info(unicode(package)) ctx.ui.info(unicode(inst_info)) elif self.options.install_info: ctx.ui.info('%-15s |%s' % (package.name, inst_info.one_liner())) else: ctx.ui.info('%15s - %s' % (package.name, unicode(package.summary)))
def init_cxx(self): if not'cc'in self.features: self.mappings['.c']=TaskGen.task_gen.mappings['.cxx'] self.p_flag_vars=set(self.p_flag_vars).union(g_cxx_flag_vars) self.p_type_vars=set(self.p_type_vars).union(g_cxx_type_vars) if not self.env['CXX_NAME']: raise Utils.WafError("At least one compiler (g++, ..) must be selected")
def run(self): self.init(database = True, write = False) component = ctx.get_option('component') if component: #FIXME: pisi api is insufficient to do this from sets import Set as set installed = pisi.api.list_installed() component_pkgs = self.componentdb.get_union_packages(component, walk=True) pkgs = list(set(installed) & set(component_pkgs)) elif self.args: pkgs = self.args else: ctx.ui.info(_('Checking all installed packages')) pkgs = pisi.api.list_installed() for pkg in pkgs: ctx.ui.info(_('* Checking %s... ') % pkg, noln=True) if self.installdb.has_package(pkg): corrupt = pisi.api.check(pkg) if corrupt: ctx.ui.info(_('\nPackage %s is corrupt.') % pkg) else: ctx.ui.info(_("OK"), verbose=False) else: ctx.ui.info(_('Package %s not installed') % pkg)
def _reference_contents(self, contents): "Helper method which builds internal structure from parsed contents" self._contents = contents self._ttl = int(self._contents.get("ttl", 0)) self._return_as_map = bool(self._contents.get("return_as_map", False)) self._legacy_dbname = self._contents.get("legacy_dbname", None) # reset these before doing the sql conversion because we will # read them there. reset these while loading so we pick up # changes. self._around = set() self._append = set() self._integer = set() self._options = self._contents.get("dynamic_where", {}) for key in self._options: if isinstance(self._options[key], basestring): self._options[key] = self._convert_sql(self._options[key]) elif isinstance(self._options[key], list): lines = [] for line in self._options[key]: lines.append(self._convert_sql(line)) self._options[key] = lines else: moreopt = {} for kk in self._options[key]: moreopt[kk] = self._convert_sql(self._options[key][kk]) self._options[key] = moreopt self._base_query = self._convert_sql(self._contents["base_query"]) self._query_suffix = self._convert_sql(self._contents.get("query_suffix", ""))
def test_regression_site_kwarg(self): mock_index_site = SearchSite() mock_index_site.register(MockModel) mock_index_site.register(AnotherMockModel) bsqs = SearchQuerySet(site=mock_index_site) self.assertEqual(set(bsqs.query.backend.site.get_indexed_models()), set([MockModel, AnotherMockModel]))
def get_trait_names(): global TRAIT_NAMES from enthought.traits.api import HasTraits if TRAIT_NAMES is None: TRAIT_NAMES = set( dir2(HasTraits()) ) - set( dir2(object()) ) else: return TRAIT_NAMES
def _merge_sanity_check(self, other): super(ValuesQuerySet, self)._merge_sanity_check(other) if (set(self.extra_names) != set(other.extra_names) or set(self.field_names) != set(other.field_names) or self.aggregate_names != other.aggregate_names): raise TypeError("Merging '%s' classes must involve the same values in each case." % self.__class__.__name__)
def model_dependencies(model, last_models=None, checked_models=None): """ Returns a set of models this one depends on to be defined; things like OneToOneFields as ID, ForeignKeys everywhere, etc. """ depends = {} checked_models = checked_models or set() # Get deps for each field for field in model._meta.fields + model._meta.many_to_many: depends.update(field_dependencies(field, last_models)) # Now recurse new_to_check = set(depends.keys()) - checked_models while new_to_check: checked_model = new_to_check.pop() if checked_model == model or checked_model in checked_models: continue checked_models.add(checked_model) deps = model_dependencies(checked_model, last_models, checked_models) # Loop through dependencies... for dep, value in deps.items(): # If the new dep is not already checked, add to the queue if (dep not in depends) and (dep not in new_to_check) and (dep not in checked_models): new_to_check.add(dep) depends[dep] = value return depends
def test_schema(self): "Test schema" sp_hbb1 = testutil.datafile('sp_hbb1') sp2 = seqdb.BlastDB(sp_hbb1) sp2.__doc__ = 'another sp' worldbase.Bio.Seq.sp2 = sp2 sp = worldbase.Bio.Seq.Swissprot.sp42() m = mapping.Mapping(sourceDB=sp,targetDB=sp2) m.__doc__ = 'sp -> sp2' worldbase.Bio.Seq.testmap = m worldbaseSchema.Bio.Seq.testmap = metabase.OneToManyRelation(sp, sp2) worldbase.commit() worldbase.clear_cache() sp3 = seqdb.BlastDB(sp_hbb1) sp3.__doc__ = 'sp number 3' worldbase.Bio.Seq.sp3 = sp3 sp2 = worldbase.Bio.Seq.sp2() m = mapping.Mapping(sourceDB=sp3,targetDB=sp2) m.__doc__ = 'sp3 -> sp2' worldbase.Bio.Seq.testmap2 = m worldbaseSchema.Bio.Seq.testmap2 = metabase.OneToManyRelation(sp3, sp2) l = worldbase._mdb.resourceCache.keys() l.sort() assert l == ['Bio.Seq.sp2', 'Bio.Seq.sp3', 'Bio.Seq.testmap2'] worldbase.commit() g = worldbase._mdb.writer.storage.graph expected = set(['Bio.Annotation.annoDB', 'Bio.Seq.Swissprot.sp42', 'Bio.Seq.sp2', 'Bio.Seq.sp3']) found = set(g.keys()) self.EQ(len(expected - found), 0)
def _parse_filename(self, filename): """Returns a messages file name components Receives the file name (without path) of a msg. Usual format is '<%d_%d.%d.%s>,U=<%d>,FMD5=<%s>:2,<FLAGS>' (pointy brackets denoting the various components). If FMD5 does not correspond with the current folder MD5, we will return None for the UID & FMD5 (as it is not valid in this folder). If UID or FMD5 can not be detected, we return `None` for the respective element. If flags are empty or cannot be detected, we return an empty flags list. :returns: (prefix, UID, FMD5, flags). UID is a numeric "long" type. flags is a set() of Maildir flags""" prefix, uid, fmd5, flags = None, None, None, set() prefixmatch = self.re_prefixmatch.match(filename) if prefixmatch: prefix = prefixmatch.group(1) folderstr = ',FMD5=%s' % self._foldermd5 foldermatch = folderstr in filename # If there was no folder MD5 specified, or if it mismatches, # assume it is a foreign (new) message and ret: uid, fmd5 = None, None if foldermatch: uidmatch = re_uidmatch.search(filename) if uidmatch: uid = long(uidmatch.group(1)) flagmatch = self.re_flagmatch.search(filename) if flagmatch: # Filter out all lowercase (custom maildir) flags. We don't # handle them yet. flags = set((c for c in flagmatch.group(1) if not c.islower())) return prefix, uid, fmd5, flags
def test_schema(self): "Test schema" sp_hbb1 = testutil.datafile('sp_hbb1') sp2 = seqdb.BlastDB(sp_hbb1) sp2.__doc__ = 'another sp' pygr.Data.Bio.Seq.sp2 = sp2 sp = pygr.Data.Bio.Seq.Swissprot.sp42() m = mapping.Mapping(sourceDB=sp, targetDB=sp2) m.__doc__ = 'sp -> sp2' pygr.Data.Bio.Seq.testmap = m pygr.Data.schema.Bio.Seq.testmap = pygr.Data.OneToManyRelation(sp, sp2) pygr.Data.save() pygr.Data.clear_cache() sp3 = seqdb.BlastDB(sp_hbb1) sp3.__doc__ = 'sp number 3' pygr.Data.Bio.Seq.sp3 = sp3 sp2 = pygr.Data.Bio.Seq.sp2() m = mapping.Mapping(sourceDB=sp3, targetDB=sp2) m.__doc__ = 'sp3 -> sp2' pygr.Data.Bio.Seq.testmap2 = m pygr.Data.schema.Bio.Seq.testmap2 = pygr.Data.OneToManyRelation(sp3, sp2) # List all cached resources. l = pygr.Data.getResource.resourceCache.keys() l.sort() assert l == ['Bio.Seq.sp2', 'Bio.Seq.sp3', 'Bio.Seq.testmap2'] pygr.Data.save() g = pygr.Data.getResource.writer.storage.graph expected = set(['Bio.Annotation.annoDB', 'Bio.Seq.Swissprot.sp42', 'Bio.Seq.sp2', 'Bio.Seq.sp3']) found = set(g.keys()) self.EQ(len(expected - found), 0)
def buildSet(args=None): """Turns a list or a string into a set.""" if isinstance(args, str): return set([args]) if args is None: return set() return set(args)
def get_tags(self, names=[], tagspaces=[], operation='union', detailed=False): """ Get tags with the given names from the given tagspaces. 'operation' is the union or intersection of all tags on names. If detailed, return a set of {tag:set([(tagspace, name), ...])}, otherwise return a set of tags. """ assert type(names) in (list, tuple, set) tagspaces = tagspaces or self.tagspaces seed_set = True all_tags = set() tagged_names = {} for tagspace, name, tags in self.walk_tagged_names(names=names, tagspaces=tagspaces): for tag in tags: tagged_names.setdefault(tag, set()).add((tagspace, name)) if operation == 'intersection': if seed_set: seed_set = False all_tags.update(tags) else: all_tags.intersection_update(tags) if not all_tags: return detailed and {} or set() else: all_tags.update(tags) if detailed: out_tags = {} for tag in all_tags: out_tags[tag] = tagged_names[tag] return out_tags else: return all_tags
def _add_items(self, source_col_name, target_col_name, *objs): # join_table: name of the m2m link table # source_col_name: the PK colname in join_table for the source object # target_col_name: the PK colname in join_table for the target object # *objs - objects to add. Either object instances, or primary keys of object instances. # If there aren't any objects, there is nothing to do. if objs: from django.db.models.base import Model # Check that all the objects are of the right type new_ids = set() for obj in objs: if isinstance(obj, self.model): new_ids.add(obj._get_pk_val()) elif isinstance(obj, Model): raise TypeError, "'%s' instance expected" % self.model._meta.object_name else: new_ids.add(obj) # Add the newly created or already existing objects to the join table. # First find out which items are already added, to avoid adding them twice cursor = connection.cursor() cursor.execute("SELECT %s FROM %s WHERE %s = %%s AND %s IN (%s)" % \ (target_col_name, self.join_table, source_col_name, target_col_name, ",".join(['%s'] * len(new_ids))), [self._pk_val] + list(new_ids)) existing_ids = set([row[0] for row in cursor.fetchall()]) # Add the ones that aren't there already for obj_id in (new_ids - existing_ids): cursor.execute("INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % \ (self.join_table, source_col_name, target_col_name), [self._pk_val, obj_id]) transaction.commit_unless_managed()
def run(self): self.init(database = True, write = False) upgradable_pkgs = pisi.api.list_upgradable() component = ctx.get_option('component') if component: #FIXME: PiSi api is insufficient to do this from sets import Set as set component_pkgs = self.componentdb.get_union_packages(component, walk=True) upgradable_pkgs = list(set(upgradable_pkgs) & set(component_pkgs)) if not upgradable_pkgs: ctx.ui.info(_('No packages to upgrade.')) upgradable_pkgs.sort() if self.options.install_info: ctx.ui.info(_('Package Name |St| Version| Rel.| Build| Distro| Date')) print '========================================================================' for pkg in upgradable_pkgs: package = self.installdb.get_package(pkg) inst_info = self.installdb.get_info(pkg) if self.options.long: ctx.ui.info(package) print inst_info elif self.options.install_info: ctx.ui.info('%-15s | %s ' % (package.name, inst_info.one_liner())) else: ctx.ui.info('%15s - %s ' % (package.name, package.summary))
def __init__(self,G): """ Initialize from a given graph instance. The graph G should have G[v][w] equal to a collection (list, set, etc) of the labels on edges from v to w; this allows us to represent multigraphs with differing labels on their multiedges. Data stored in fields of this instance: - self.nrg is a transformed unlabeled graph in which paths represent nonrepetitive paths in G - self.labels is a dictionary mapping vertices to their label sets """ self.labels = {} for v in G: self.labels[v] = set() for w in G[v]: self.labels[w] = set() for v in G: for w in G[v]: self.labels[v].update(G[v][w]) self.labels[w].update(G[v][w]) self.nrg = {} for v in self: self._gadget(v,self.labels[v]) for v in G: for w in G[v]: for L in G[v][w]: self.nrg[v,L,False].add((w,L,True))
def update(self, *args, **kwargs): get_dependant_stack().push(None) try: passed_keys = set(kwargs) if args: passed_keys.update(set(args[0])) keys = set(self._key_subcells).intersection(passed_keys) originals = {} missing = set() for key in keys: if self.has_key(key): originals[key] = self[key] else: missing.add(key) finally: get_dependant_stack().pop(None) dict_update(self, *args, **kwargs) self.changed() # Find all of those that were originaly here and have changed. get_dependant_stack().push(None) try: changed = set() for key, v in originals.items(): if v != self[key]: changed.add(key) finally: get_dependant_stack().pop(None) for key in changed | missing: self._key_changed(key)
def getRootModules(): """ Returns a list containing the names of all the modules available in the folders of the pythonpath. """ modules = [] if ip.db.has_key('rootmodules'): return ip.db['rootmodules'] t = time() store = False for path in sys.path: modules += moduleList(path) if time() - t >= TIMEOUT_STORAGE and not store: store = True print "\nCaching the list of root modules, please wait!" print "(This will only be done once - type '%rehashx' to " + \ "reset cache!)" print if time() - t > TIMEOUT_GIVEUP: print "This is taking too long, we give up." print ip.db['rootmodules'] = [] return [] modules += sys.builtin_module_names modules = list(set(modules)) if '__init__' in modules: modules.remove('__init__') modules = list(set(modules)) if store: ip.db['rootmodules'] = modules return modules
def get_permissions( self ): permissions = set() for user, user_group, group, group_permission, permission in self.sandbox.recall( user_class + user_group_class + group_class + group_permission_class + permission_class, lambda u,ug,g,gp,p: u.user_id == self.user_id ): permissions = permissions | set( [ permission ] ) return permissions
def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True): dircont=self.listdir() dircont.sort() try: lst=set(self.children.keys()) if remove: for x in lst-set(dircont): del self.children[x] except: self.children={} for name in dircont: npats=accept(name,pats) if npats and npats[0]: accepted=[]in npats[0] node=self.make_node([name]) isdir=os.path.isdir(node.abspath()) if accepted: if isdir: if dir: yield node else: if src: yield node if getattr(node,'cache_isdir',None)or isdir: node.cache_isdir=True if maxdepth: for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src): yield k raise StopIteration
def _gadget(self,v,labels): """Create nonrepetitivity gadget for vertex v and given label set.""" labels = list(labels) for L in labels: self.nrg.setdefault((v,L,True),set()) self.nrg.setdefault((v,L,False),set()) if len(labels) == 1: return groups = [] n = len(labels) while n > 0: if n % 3 == 0: grouplen = 3 else: grouplen = 2 group = labels[n-grouplen:n] for L1 in group: for L2 in group: if L1 != L2: self.nrg[v,L1,True].add((v,L2,False)) if len(labels) > 3: groups.append(object()) self.nrg[v,groups[-1],False] = set([(v,L,False) for L in group]) for L in group: self.nrg[v,L,True].add((v,groups[-1],True)) n -= grouplen if len(groups) > 1: self._gadget(v,groups)
def old_init(self): try: ads = set() except NameError: # for Python 2.3 from sets import Set as set ads = set() positions = [] all_comments = self.ballot.comments.all().select_related('ad') for p in self.ballot.positions.all().select_related('ad'): po = create_position_object(self.ballot, p, all_comments) #if not self.ballot_active: # if 'is_old_ad' in po: # del po['is_old_ad'] ads.add(str(p.ad)) positions.append(po) for c in all_comments: if (str(c.ad) not in ads) and c.ad.is_current_ad(): positions.append({'has_text':True, 'comment_text':c.text, 'comment_date':c.date, 'comment_revision':str(c.revision), 'ad_name':str(c.ad), 'ad_username': c.ad.login_name, 'position':'No Record', 'is_old_ad':False}) ads.add(str(c.ad)) if self.ballot_active: for ad in IESGLogin.active_iesg(): if str(ad) not in ads: positions.append(dict(ad_name=str(ad), ad_username=ad.login_name, position="No Record")) self._positions = positions
def walk_tagged_names(self, names, tags, predicate): db = self.env.get_db_cnx() cursor = db.cursor() args = [self.tagspace] sql = 'SELECT DISTINCT name, tag FROM tags WHERE tagspace=%s' if names: sql += ' AND name IN (' + ', '.join(['%s' for n in names]) + ')' args += names if tags: sql += ' AND name in (SELECT name FROM tags WHERE tag in (' + ', '.join(['%s' for t in tags]) + '))' args += tags sql += " ORDER BY name" cursor.execute(sql, args) tags = set(tags) current_name = None name_tags = set() for name, tag in cursor: if current_name != name: if current_name is not None: if predicate(current_name, name_tags): yield (current_name, name_tags) name_tags = set([tag]) current_name = name else: name_tags.add(tag) if current_name is not None and predicate(current_name, name_tags): yield (current_name, name_tags)
import __builtin__ import __main__ import glob import keyword import os import re import shlex import sys import IPython.rlineimpl as readline import types # Python 2.4 offers sets as a builtin try: set([1, 2]) except NameError: from sets import Set as set from IPython.genutils import debugx __all__ = ['Completer', 'IPCompleter'] def get_class_members(cls): ret = dir(cls) if hasattr(cls, '__bases__'): for base in cls.__bases__: ret.extend(get_class_members(base)) return ret
def __call__(self, req, form): """ Perform a search. """ argd = wash_search_urlargd(form) _ = gettext_set_language(argd['ln']) if req.method == 'POST': raise apache.SERVER_RETURN, apache.HTTP_METHOD_NOT_ALLOWED uid = getUid(req) user_info = collect_user_info(req) if uid == -1: return page_not_authorized( req, "../", text=_("You are not authorized to view this area."), navmenuid='search') elif uid > 0: pref = get_user_preferences(uid) try: if not form.has_key('rg'): # fetch user rg preference only if not overridden via URL argd['rg'] = int(pref['websearch_group_records']) except (KeyError, ValueError): pass if argd['rg'] > CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS and acc_authorize_action( req, 'runbibedit')[0] != 0: argd['rg'] = CFG_WEBSEARCH_MAX_RECORDS_IN_GROUPS involved_collections = set() involved_collections.update(argd['c']) involved_collections.add(argd['cc']) if argd['id'] > 0: argd['recid'] = argd['id'] if argd['idb'] > 0: argd['recidb'] = argd['idb'] if argd['sysno']: tmp_recid = find_record_from_sysno(argd['sysno']) if tmp_recid: argd['recid'] = tmp_recid if argd['sysnb']: tmp_recid = find_record_from_sysno(argd['sysnb']) if tmp_recid: argd['recidb'] = tmp_recid if argd['recid'] > 0: if argd['recidb'] > argd['recid']: # Hack to check if among the restricted collections # at least a record of the range is there and # then if the user is not authorized for that # collection. recids = intbitset(xrange(argd['recid'], argd['recidb'])) restricted_collection_cache.recreate_cache_if_needed() for collname in restricted_collection_cache.cache: (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collname) if auth_code and user_info['email'] == 'guest': coll_recids = get_collection(collname).reclist if coll_recids & recids: cookie = mail_cookie_create_authorize_action( VIEWRESTRCOLL, {'collection': collname}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') else: involved_collections.add( guess_primary_collection_of_a_record(argd['recid'])) # If any of the collection requires authentication, redirect # to the authentication form. for coll in involved_collections: if collection_restricted_p(coll): (auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=coll) if auth_code and user_info['email'] == 'guest': cookie = mail_cookie_create_authorize_action( VIEWRESTRCOLL, {'collection': coll}) target = CFG_SITE_SECURE_URL + '/youraccount/login' + \ make_canonical_urlargd({'action': cookie, 'ln' : argd['ln'], 'referer' : CFG_SITE_SECURE_URL + req.unparsed_uri}, {}) return redirect_to_url(req, target, norobot=True) elif auth_code: return page_not_authorized(req, "../", \ text=auth_msg, \ navmenuid='search') #check if the user has rights to set a high wildcard limit #if not, reduce the limit set by user, with the default one if CFG_WEBSEARCH_WILDCARD_LIMIT > 0 and ( argd['wl'] > CFG_WEBSEARCH_WILDCARD_LIMIT or argd['wl'] == 0): auth_code, auth_message = acc_authorize_action(req, 'runbibedit') if auth_code != 0: argd['wl'] = CFG_WEBSEARCH_WILDCARD_LIMIT # only superadmins can use verbose parameter for obtaining debug information if not isUserSuperAdmin(user_info): argd['verbose'] = 0 # Keep all the arguments, they might be reused in the # search_engine itself to derivate other queries req.argd = argd # mod_python does not like to return [] in case when of=id: out = perform_request_search(req, **argd) if isinstance(out, intbitset): return out.fastdump() elif out == []: return str(out) else: return out
def __init__(self, db, const): self.db = db self.const = const self.name = self.get_name() self.depends = set() self.depends_on = set()
def __init__(self, db, const): self.db = db self.const = const self.cts = db.genoo.TypeSystem(db) self.depends = set() self.depends_on = set()
if verbose: sys.stderr.write( "error: adding stream: %s %s %s.%s.%s.%s\n" % (stream[0], stream[1], stream[2], stream[3], stream[4], stream[5])) else: if verbose: sys.stderr.write("adding stream: %s %s %s.%s.%s.%s\n" % (stream[0], stream[1], stream[2], stream[3], stream[4], stream[5])) input = seiscomp3.IO.RecordInput(rs, seiscomp3.Core.Array.INT, seiscomp3.Core.Record.SAVE_RAW) filePool = dict() f = None accessedFiles = set() try: for rec in input: if stdout: out.write(rec.raw().str()) continue dir, file = archive.location(rec.startTime(), rec.networkCode(), rec.stationCode(), rec.locationCode(), rec.channelCode()) file = dir + file if test == False: try: f = filePool[file] except:
def paginate(context, window=DEFAULT_WINDOW, hashtag=''): """ Renders the ``pagination/pagination.html`` template, resulting in a Digg-like display of the available pages, given the current page. If there are too many pages to be displayed before and after the current page, then elipses will be used to indicate the undisplayed gap between page numbers. Requires one argument, ``context``, which should be a dictionary-like data structure and must contain the following keys: ``paginator`` A ``Paginator`` or ``QuerySetPaginator`` object. ``page_obj`` This should be the result of calling the page method on the aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given the current page. This same ``context`` dictionary-like data structure may also include: ``getvars`` A dictionary of all of the **GET** parameters in the current request. This is useful to maintain certain types of state, even when requesting a different page. """ try: paginator = context['paginator'] page_obj = context['page_obj'] page_range = paginator.page_range # Calculate the record range in the current page for display. records = {'first': 1 + (page_obj.number - 1) * paginator.per_page} records['last'] = records['first'] + paginator.per_page - 1 if records['last'] + paginator.orphans >= paginator.count: records['last'] = paginator.count # First and last are simply the first *n* pages and the last *n* # pages, where *n* is the current window size. # ... unless the start and end paramter have been set if 'paginate_start' in context: start = context['paginate_start'] else: start = window if 'paginate_end' in context: end = context['paginate_end'] else: end = window first = set(page_range[:start]) last = set(page_range[-end:]) # Now we look around our current page, making sure that we don't wrap # around. if 'paginate_before' in context: before = context['paginate_before'] else: before = window current_start = page_obj.number-1-before if current_start < 0: current_start = 0 if 'paginate_after' in context: after = context['paginate_after'] else: after = window current_end = page_obj.number-1+after if current_end < 0: current_end = 0 current = set(page_range[current_start:current_end]) pages = [] # If there's no overlap between the first set of pages and the current # set of pages, then there's a possible need for elusion. if len(first.intersection(current)) == 0: first_list = list(first) first_list.sort() second_list = list(current) second_list.sort() pages.extend(first_list) if first_list: diff = second_list[0] - first_list[-1] # If there is a gap of two, between the last page of the first # set and the first page of the current set, then we're missing a # page. if diff == 2: pages.append(second_list[0] - 1) # If the difference is just one, then there's nothing to be done, # as the pages need no elusion and are correct. elif diff == 1: pass # Otherwise, there's a bigger gap which needs to be signaled for # elusion, by pushing a None value to the page list. else: pages.append(None) pages.extend(second_list) else: unioned = list(first.union(current)) unioned.sort() pages.extend(unioned) # If there's no overlap between the current set of pages and the last # set of pages, then there's a possible need for elusion. if len(current.intersection(last)) == 0: second_list = list(last) second_list.sort() diff = second_list[0] - pages[-1] # If there is a gap of two, between the last page of the current # set and the first page of the last set, then we're missing a # page. if diff == 2: pages.append(second_list[0] - 1) # If the difference is just one, then there's nothing to be done, # as the pages need no elusion and are correct. elif diff == 1: pass # Otherwise, there's a bigger gap which needs to be signaled for # elusion, by pushing a None value to the page list. else: pages.append(None) pages.extend(second_list) else: differenced = list(last.difference(current)) differenced.sort() pages.extend(differenced) to_return = { 'MEDIA_URL': settings.MEDIA_URL, 'pages': pages, 'records': records, 'page_obj': page_obj, 'paginator': paginator, 'hashtag': hashtag, 'is_paginated': paginator.count > paginator.per_page, } if 'request' in context: getvars = context['request'].GET.copy() if 'page' in getvars: del getvars['page'] if len(getvars.keys()) > 0: to_return['getvars'] = "&%s" % getvars.urlencode() else: to_return['getvars'] = '' return to_return except (KeyError, AttributeError): return {}
def attr_matches(self, text): """Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated. """ import re # Another option, seems to work great. Catches things like ''.<tab> m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text) if not m: return [] expr, attr = m.group(1, 3) try: object = eval(expr, self.namespace) except: object = eval(expr, self.global_namespace) # Start building the attribute list via dir(), and then complete it # with a few extra special-purpose calls. words = dir(object) if hasattr(object, '__class__'): words.append('__class__') words.extend(get_class_members(object.__class__)) # this is the 'dir' function for objects with Enthought's traits if hasattr(object, 'trait_names'): try: words.extend(object.trait_names()) # eliminate possible duplicates, as some traits may also # appear as normal attributes in the dir() call. words = set(words) except TypeError: # This will happen if `object` is a class and not an instance. pass # Support for PyCrust-style _getAttributeNames magic method. if hasattr(object, '_getAttributeNames'): try: words.extend(object._getAttributeNames()) # Eliminate duplicates. words = set(words) except TypeError: # `object` is a class and not an instance. Ignore # this error. pass # filter out non-string attributes which may be stuffed by dir() calls # and poor coding in third-party modules words = [ w for w in words if isinstance(w, basestring) and w != "__builtins__" ] # Build match list to return n = len(attr) return ["%s.%s" % (expr, w) for w in words if w[:n] == attr]
def convert_set(s): return set(s.split(","))
'regular' : 400, 'book' : 400, 'medium' : 500, 'roman' : 500, 'semibold' : 600, 'demibold' : 600, 'demi' : 600, 'bold' : 700, 'heavy' : 800, 'extra bold' : 800, 'black' : 900} font_family_aliases = set([ 'serif', 'sans-serif', 'sans serif', 'cursive', 'fantasy', 'monospace', 'sans']) # OS Font paths MSFolders = \ r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders' MSFontDirectories = [ r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts', r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts'] X11FontDirectories = [ # an old standard installation point "/usr/X11R6/lib/X11/fonts/TTF/",
def action_distinct_count(self, attribute_name=None): values = filter(lambda v: v is not None, self.get_queryset_values(attribute_name)) return len(set(values))
def __init__(self, t1file, glyphnames, charcodes): pdfwriter.PDFobject.__init__(self, "fontfile", t1file.name) self.t1file = t1file self.glyphnames = set(glyphnames) self.charcodes = set(charcodes)
def keys(self): """Return all keys in the comment.""" return self and list(set([k.lower() for k, v in self]))
def create_xref(self): # this code probably needs cleanup depgraph = {} importedby = {} for name, value in self._depgraph.items(): depgraph[name] = list(value) for needs in value: importedby.setdefault(needs, set()).add(name) names = self._types.keys() names.sort() fd, htmlfile = tempfile.mkstemp(".html") ofi = open(htmlfile, "w") os.close(fd) print >> ofi, "<html><title>py2exe cross reference for %s</title><body>" % sys.argv[0] print >> ofi, "<h1>py2exe cross reference for %s</h1>" % sys.argv[0] for name in names: if self._types[name] in (imp.PY_SOURCE, imp.PKG_DIRECTORY): print >> ofi, '<a name="%s"><b><tt>%s</tt></b></a>' % (name, name) if name == "__main__": for fname in self._scripts: path = urllib.pathname2url(os.path.abspath(fname)) print >> ofi, '<a target="code" href="%s" type="text/plain"><tt>%s</tt></a> ' \ % (path, fname) print >> ofi, '<br>imports:' else: fname = urllib.pathname2url(self.modules[name].__file__) print >> ofi, '<a target="code" href="%s" type="text/plain"><tt>%s</tt></a><br>imports:' \ % (fname, self.modules[name].__file__) else: fname = self.modules[name].__file__ if fname: print >> ofi, '<a name="%s"><b><tt>%s</tt></b></a> <tt>%s</tt><br>imports:' \ % (name, name, fname) else: print >> ofi, '<a name="%s"><b><tt>%s</tt></b></a> <i>%s</i><br>imports:' \ % (name, name, TYPES[self._types[name]]) if name in depgraph: needs = depgraph[name] for n in needs: print >> ofi, '<a href="#%s"><tt>%s</tt></a> ' % (n, n) print >> ofi, "<br>\n" print >> ofi, 'imported by:' if name in importedby: for i in importedby[name]: print >> ofi, '<a href="#%s"><tt>%s</tt></a> ' % (i, i) print >> ofi, "<br>\n" print >> ofi, "<br>\n" print >> ofi, "</body></html>" ofi.close() os.startfile(htmlfile) # how long does it take to start the browser? import threading threading.Timer(5, os.remove, args=[htmlfile])
def _f(data, all_data=None): data = set(data) if data - _get_choices_keys(choices): raise ValidationError, _( u'Select a valid choice. That choice is not one of the available choices.' )
def __init__(self, *args, **kw): self._depgraph = {} self._types = {} self._last_caller = None self._scripts = set() Base.__init__(self, *args, **kw)
# (at your option) any later version. # # PyX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyX; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA from pyx import bbox, canvasitem, deco, path, pswriter, pdfwriter, trafo, unit import t1file try: set() except NameError: # Python 2.3 from sets import Set as set ############################################################################## # PS resources ############################################################################## class PST1file(pswriter.PSresource): """ PostScript font definition included in the prolog """ def __init__(self, t1file, glyphnames, charcodes): """ include type 1 font t1file stripped to the given glyphnames""" self.type = "t1file" self.t1file = t1file
def delete_view(self, request, object_id, extra_context=None): "The 'delete' admin view for this model." opts = self.model._meta app_label = opts.app_label try: obj = self.queryset(request).get(pk=object_id) except self.model.DoesNotExist: # Don't raise Http404 just yet, because we haven't checked # permissions yet. We don't want an unauthenticated user to be able # to determine whether a given object exists. obj = None if not self.has_delete_permission(request, obj): raise PermissionDenied if obj is None: raise Http404( _('%(name)s object with primary key %(key)r does not exist.') % { 'name': force_unicode(opts.verbose_name), 'key': escape(object_id) }) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. deleted_objects = [ mark_safe(u'%s: <a href="../../%s/">%s</a>' % (escape(force_unicode(capfirst(opts.verbose_name))), quote(object_id), escape(obj))), [] ] perms_needed = set() get_deleted_objects(deleted_objects, perms_needed, request.user, obj, opts, 1, self.admin_site) if request.POST: # The user has already confirmed the deletion. if perms_needed: raise PermissionDenied obj_display = force_unicode(obj) self.log_deletion(request, obj, obj_display) obj.delete() self.message_user( request, _('The %(name)s "%(obj)s" was deleted successfully.') % { 'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display) }) if not self.has_change_permission(request, None): return HttpResponseRedirect("../../../../") return HttpResponseRedirect("../../") context = { "title": _("Are you sure?"), "object_name": force_unicode(opts.verbose_name), "object": obj, "deleted_objects": deleted_objects, "perms_lacking": perms_needed, "opts": opts, "root_path": self.admin_site.root_path, "app_label": app_label, } context.update(extra_context or {}) return render_to_response( self.delete_confirmation_template or [ "admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()), "admin/%s/delete_confirmation.html" % app_label, "admin/delete_confirmation.html" ], context, context_instance=template.RequestContext(request))
def import_module(self,partnam,fqname,parent): r = Base.import_module(self,partnam,fqname,parent) if r is not None and self._last_caller: self._depgraph.setdefault(self._last_caller.__name__, set()).add(r.__name__) return r
def submit(self, func, args=(), depfuncs=(), modules=(), callback=None, callbackargs=(), group='default', globals=None): """Submits function to the execution queue func - function to be executed args - tuple with arguments of the 'func' depfuncs - tuple with functions which might be called from 'func' modules - tuple with module names to import callback - callback function which will be called with argument list equal to callbackargs+(result,) as soon as calculation is done callbackargs - additional arguments for callback function group - job group, is used when wait(group) is called to wait for jobs in a given group to finish globals - dictionary from which all modules, functions and classes will be imported, for instance: globals=globals() """ # perform some checks for frequent mistakes if self._exiting: raise DestroyedServerError("Cannot submit jobs: server"\ " instance has been destroyed") if not isinstance(args, tuple): raise TypeError("args argument must be a tuple") if not isinstance(depfuncs, tuple): raise TypeError("depfuncs argument must be a tuple") if not isinstance(modules, tuple): raise TypeError("modules argument must be a tuple") if not isinstance(callbackargs, tuple): raise TypeError("callbackargs argument must be a tuple") if globals is not None and not isinstance(globals, dict): raise TypeError("globals argument must be a dictionary") for module in modules: if not isinstance(module, str): raise TypeError("modules argument must be a list of strings") tid = self.__gentid() other_type = types.FunctionType if six.PY3 else types.ClassType if globals: modules += tuple(self.__find_modules("", globals)) modules = tuple(set(modules)) self.logger.debug("Task %i will autoimport next modules: %s" % (tid, str(modules))) for object1 in globals.values(): if isinstance(object1, types.FunctionType) \ or isinstance(object1, other_type): depfuncs += (object1, ) task = _Task(self, tid, callback, callbackargs, group) self.__waittasks_lock.acquire() self.__waittasks.append(task) self.__waittasks_lock.release() # if the function is a method of a class add self to the arguments list if isinstance(func, types.MethodType): func_self = func.__self__ if six.PY3 else func.im_self if func_self is not None: args = (func_self, ) + args # if there is an instance of a user defined class in the arguments add # whole class to dependancies for arg in args: # Checks for both classic or new class instances if (six.PY2 and isinstance(arg, types.InstanceType)) \ or str(type(arg))[:6] == "<class": # in PY3, all instances are <class... so skip the builtins if getattr(inspect.getmodule(arg), '__name__', None) \ in ['builtins', '__builtin__', None]: pass # do not include source for imported modules elif ppc.is_not_imported(arg, modules): depfuncs += tuple(ppc.get_class_hierarchy(arg.__class__)) # if there is a function in the arguments add this # function to dependancies for arg in args: if isinstance(arg, types.FunctionType): depfuncs += (arg, ) sfunc = self.__dumpsfunc((func, ) + depfuncs, modules) sargs = pickle.dumps(args, self.__pickle_proto) self.__queue_lock.acquire() self.__queue.append((task, sfunc, sargs)) self.__queue_lock.release() self.logger.debug("Task %i submited, function='%s'" % (tid, getname(func))) self.__scheduler() return task
def generictest(testFile): func_name = __name__ = __doc__ = id = 'test_sparql.' + \ os.path.splitext(testFile)[0][8:].translate( maketrans('-/','__')) store = plugin.get(STORE, Store)() bootStrapStore(store) store.commit() prefix = testFile.split('.rq')[-1] manifestPath = '/'.join(testFile.split('/')[:-1] + ['manifest.n3']) manifestPath2 = '/'.join(testFile.split('/')[:-1] + ['manifest.ttl']) queryFileName = testFile.split('/')[-1] store = plugin.get(STORE, Store)() store.open(configString, create=False) assert len(store) == 0 manifestG = ConjunctiveGraph(store) if not os.path.exists(manifestPath): assert os.path.exists(manifestPath2) manifestPath = manifestPath2 manifestG.default_context.parse(open(manifestPath), publicID=TEST_BASE, format='n3') manifestData = manifestG.query( MANIFEST_QUERY, processor='sparql', initBindings={'query': TEST_BASE[queryFileName]}, initNs=manifestNS, DEBUG=False) store.rollback() store.close() for source, testCaseName, testCaseComment, expectedRT in manifestData: if expectedRT: expectedRT = '/'.join(testFile.split('/')[:-1] + \ [expectedRT.replace(TEST_BASE,'')]) if source: source = '/'.join(testFile.split('/')[:-1] + \ [source.replace(TEST_BASE,'')]) testCaseName = testCaseComment and testCaseComment or testCaseName # log.debug("## Source: %s ##"%source) # log.debug("## Test: %s ##"%testCaseName) # log.debug("## Result: %s ##"%expectedRT) #Expected results if expectedRT: store = plugin.get(STORE, Store)() store.open(configString, create=False) resultG = ConjunctiveGraph(store).default_context log.debug("###" * 10) log.debug("parsing: %s" % open(expectedRT).read()) log.debug("###" * 10) assert len(store) == 0 # log.debug("## Parsing (%s) ##"%(expectedRT)) if not trialAndErrorRTParse(resultG, expectedRT, DEBUG): log.debug( "Unexpected result format (for %s), skipping" % \ (expectedRT)) store.rollback() store.close() continue log.debug("## Done .. ##") rtVars = [ rtVar for rtVar in resultG.objects(None, RESULT_NS.resultVariable) ] bindings = [] resultSetNode = resultG.value(predicate=RESULT_NS.value, object=RESULT_NS.ResultSet) for solutionNode in resultG.objects(resultSetNode, RESULT_NS.solution): bindingDict = dict([(key, None) for key in rtVars]) for bindingNode in resultG.objects(solutionNode, RESULT_NS.binding): value = resultG.value(subject=bindingNode, predicate=RESULT_NS.value) name = resultG.value(subject=bindingNode, predicate=RESULT_NS.variable) bindingDict[name] = value rbinds = [bindingDict[vName] for vName in rtVars] # print("Rbinds", rbinds) if len(rbinds) > 1 and (isinstance(rbinds, list) or isinstance(rbinds, tuple)): bindings.append(frozenset(rbinds)) elif len(rbinds) == 1 and (isinstance(rbinds, list) or isinstance(rbinds, tuple)): bindings.append(rbinds[0]) else: bindings.append(rbinds) # bindings.append(tuple([bindingDict[vName] for vName in rtVars])) log.debug(open(expectedRT).read()) store.rollback() store.close() if testFile in tests2Skip.keys(): log.debug("Skipping test (%s) %s\n" % \ (testFile, tests2Skip[testFile])) raise SkipTest("Skipping test (%s) %s\n" % \ (testFile, tests2Skip[testFile])) query = open(testFile).read() log.debug("### %s (%s) ###" % (testCaseName, testFile)) log.debug(query) p = parse(query) #,DEBUG_PARSE) log.debug(p) if EVALUATE and source: log.debug("### Source Graph: ###") log.debug(open(source).read()) store = plugin.get(STORE, Store)() store.open(configString, create=False) g = ConjunctiveGraph(store) try: g.parse(open(source), format='n3') except: log.debug("Unexpected data format (for %s), skipping" % \ (source)) store.rollback() store.close() continue rt = g.query(query, processor='sparql', DEBUG=False) if expectedRT: try: result = rt.result except AttributeError: result = rt if isinstance(result, Graph): resgraph = open(graphtests[testFile]).read() store = plugin.get(STORE, Store)() store.open(configString, create=False) g = ConjunctiveGraph(store) g.parse(data=resgraph, format="n3") assert result == g, \ "### Test Failed: ###\n\nB:\n%s\n\nR:\n%s\n\n" % \ (g.serialize(format="n3"), result.serialize(format="n3")) else: # result = [r[0] for r in result if isinstance(r, (tuple, list))] def stab(r): if isinstance(r, (tuple, list)): return frozenset(r) else: return r results = set([stab(r) for r in result]) assert set(bindings).difference(results) == set([]) or set(bindings) == results, \ "### Test Failed: ###\n\nB:\n%s\n\nR:\n%s\n\n" % \ (set(bindings), results) log.debug("### Test Passed: ###") store.rollback()
def sql_create_model(self, model, style, known_models=set()): """ Returns the SQL required to create a single model, as a tuple of: (list_of_sql, pending_references_dict) """ from django.db import models opts = model._meta if not opts.managed or opts.proxy: return [], {} final_output = [] table_output = [] pending_references = {} qn = self.connection.ops.quote_name for f in opts.local_fields: col_type = f.db_type() tablespace = f.db_tablespace or opts.db_tablespace if col_type is None: # Skip ManyToManyFields, because they're not represented as # database columns in this table. continue # Make the definition (e.g. 'foo VARCHAR(30)') for this field. field_output = [style.SQL_FIELD(qn(f.column)), style.SQL_COLTYPE(col_type)] if not f.null: field_output.append(style.SQL_KEYWORD('NOT NULL')) if f.primary_key: field_output.append(style.SQL_KEYWORD('PRIMARY KEY')) elif f.unique: field_output.append(style.SQL_KEYWORD('UNIQUE')) if tablespace and f.unique: # We must specify the index tablespace inline, because we # won't be generating a CREATE INDEX statement for this field. field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True)) if f.rel: ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style) if pending: pr = pending_references.setdefault(f.rel.to, []).append((model, f)) else: field_output.extend(ref_output) table_output.append(' '.join(field_output)) if opts.order_with_respect_to: table_output.append(style.SQL_FIELD(qn('_order')) + ' ' + \ style.SQL_COLTYPE(models.IntegerField().db_type())) for field_constraints in opts.unique_together: table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \ ", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints])) full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' ('] for i, line in enumerate(table_output): # Combine and add commas. full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or '')) full_statement.append(')') if opts.db_tablespace: full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace)) full_statement.append(';') final_output.append('\n'.join(full_statement)) if opts.has_auto_field: # Add any extra SQL needed to support auto-incrementing primary keys. auto_column = opts.auto_field.db_column or opts.auto_field.name autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column) if autoinc_sql: for stmt in autoinc_sql: final_output.append(stmt) return final_output, pending_references
def localize(self, dt, is_dst=False): '''Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight saving time. >>> from pytz import timezone >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> amdam = timezone('Europe/Amsterdam') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) '2004-10-31 02:00:00 CEST (+0200)' >>> loc_dt2.strftime(fmt) '2004-10-31 02:00:00 CET (+0100)' >>> str(loc_dt2 - loc_dt1) '1:00:00' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight saving time >>> try: ... loc_dt1 = amdam.localize(dt, is_dst=None) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True is_dst is also used to determine the correct timezone in the wallclock times jumped over at the start of daylight saving time. >>> pacific = timezone('US/Pacific') >>> dt = datetime(2008, 3, 9, 2, 0, 0) >>> ploc_dt1 = pacific.localize(dt, is_dst=True) >>> ploc_dt2 = pacific.localize(dt, is_dst=False) >>> ploc_dt1.strftime(fmt) '2008-03-09 02:00:00 PDT (-0700)' >>> ploc_dt2.strftime(fmt) '2008-03-09 02:00:00 PST (-0800)' >>> str(ploc_dt2 - ploc_dt1) '1:00:00' Use is_dst=None to raise a NonExistentTimeError for these skipped times. >>> try: ... loc_dt1 = pacific.localize(dt, is_dst=None) ... except NonExistentTimeError: ... print('Non-existent') Non-existent ''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') # Find the two best possibilities. possible_loc_dt = set() for delta in [timedelta(days=-1), timedelta(days=1)]: loc_dt = dt + delta idx = max(0, bisect_right(self._utc_transition_times, loc_dt) - 1) inf = self._transition_info[idx] tzinfo = self._tzinfos[inf] loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if loc_dt.replace(tzinfo=None) == dt: possible_loc_dt.add(loc_dt) if len(possible_loc_dt) == 1: return possible_loc_dt.pop() # If there are no possibly correct timezones, we are attempting # to convert a time that never happened - the time period jumped # during the start-of-DST transition period. if len(possible_loc_dt) == 0: # If we refuse to guess, raise an exception. if is_dst is None: raise NonExistentTimeError(dt) # If we are forcing the pre-DST side of the DST transition, we # obtain the correct timezone by winding the clock forward a few # hours. elif is_dst: return self.localize(dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) # If we are forcing the post-DST side of the DST transition, we # obtain the correct timezone by winding the clock back. else: return self.localize(dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) # If we get this far, we have multiple possible timezones - this # is an ambiguous case occuring during the end-of-DST transition. # If told to be strict, raise an exception since we have an # ambiguous case if is_dst is None: raise AmbiguousTimeError(dt) # Filter out the possiblilities that don't match the requested # is_dst filtered_possible_loc_dt = [ p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst ] # Hopefully we only have one possibility left. Return it. if len(filtered_possible_loc_dt) == 1: return filtered_possible_loc_dt[0] if len(filtered_possible_loc_dt) == 0: filtered_possible_loc_dt = list(possible_loc_dt) # If we get this far, we have in a wierd timezone transition # where the clocks have been wound back but is_dst is the same # in both (eg. Europe/Warsaw 1915 when they switched to CET). # At this point, we just have to guess unless we allow more # hints to be passed in (such as the UTC offset or abbreviation), # but that is just getting silly. # # Choose the earliest (by UTC) applicable timezone if is_dst=True # Choose the latest (by UTC) applicable timezone if is_dst=False # i.e., behave like end-of-DST transition dates = {} # utc -> local for local_dt in filtered_possible_loc_dt: utc_time = (local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset) assert utc_time not in dates dates[utc_time] = local_dt return dates[[min, max][not is_dst](dates)]
def _get_permissions(self): perms = set() for g in self.groups: perms = perms | set(g.permissions) return perms
def __init__(self, ncpus="autodetect", ppservers=(), secret=None, restart=False, proto=2, socket_timeout=3600): """Creates Server instance ncpus - the number of worker processes to start on the local computer, if parameter is omitted it will be set to the number of processors in the system ppservers - list of active parallel python execution servers to connect with secret - passphrase for network connections, if omitted a default passphrase will be used. It's highly recommended to use a custom passphrase for all network connections. restart - whether to restart worker process after each task completion proto - protocol number for pickle module socket_timeout - socket timeout in seconds which is also the maximum time a remote job could be executed. Increase this value if you have long running jobs or decrease if connectivity to remote ppservers is often lost. With ncpus = 1 all tasks are executed consequently For the best performance either use the default "autodetect" value or set ncpus to the total number of processors in the system """ if not isinstance(ppservers, tuple): raise TypeError("ppservers argument must be a tuple") self.logger = logging.getLogger('pp') self.logger.info("Creating server instance (pp-" + version+")") self.logger.info("Running on Python %s %s", sys.version.split(" ")[0], sys.platform) self.__tid = 0 self.__active_tasks = 0 self.__active_tasks_lock = threading.Lock() self.__queue = [] self.__queue_lock = threading.Lock() self.__workers = [] self.__rworkers = [] self.__rworkers_reserved = [] self.__sourcesHM = {} self.__sfuncHM = {} self.__waittasks = [] self.__waittasks_lock = threading.Lock() self._exiting = False self.__accurate_stats = True self.autopp_list = {} self.__active_rworkers_list_lock = threading.Lock() self.__restart_on_free = restart self.__pickle_proto = proto self.__connect_locks = {} # add local directory and sys.path to PYTHONPATH pythondirs = [os.getcwd()] + sys.path if "PYTHONPATH" in os.environ and os.environ["PYTHONPATH"]: pythondirs += os.environ["PYTHONPATH"].split(os.pathsep) os.environ["PYTHONPATH"] = os.pathsep.join(set(pythondirs)) atexit.register(self.destroy) self.__stats = {"local": _Statistics(0)} self.set_ncpus(ncpus) self.ppservers = [] self.auto_ppservers = [] self.socket_timeout = socket_timeout for ppserver in ppservers: ppserver = ppserver.split(":") host = ppserver[0] if len(ppserver)>1: port = int(ppserver[1]) else: port = ppc.randomport() if host.find("*") == -1: self.ppservers.append((host, port)) else: if host == "*": host = "*.*.*.*" interface = host.replace("*", "0") broadcast = host.replace("*", "255") self.auto_ppservers.append(((interface, port), (broadcast, port))) self.__stats_lock = threading.Lock() if secret is not None: if not isinstance(secret, str): raise TypeError("secret must be of a string type") self.secret = str(secret) elif hasattr(user, "pp_secret"): secret = getattr(user, "pp_secret") if not isinstance(secret, str): raise TypeError("secret must be of a string type") self.secret = str(secret) else: self.secret = Server.default_secret self.__connect() self.__creation_time = time.time() self.logger.info("pp local server started with %d workers" % (self.__ncpus, ))
def testAssertSetEqual(self): self.assertMessages('assertSetEqual', (set(), set([None])), ["None$", "^oops$", "None$", "None : oops$"])
def validate_base(cls, model): opts = model._meta # raw_id_fields if hasattr(cls, 'raw_id_fields'): check_isseq(cls, 'raw_id_fields', cls.raw_id_fields) for idx, field in enumerate(cls.raw_id_fields): f = get_field(cls, model, opts, 'raw_id_fields', field) if not isinstance(f, (models.ForeignKey, models.ManyToManyField)): raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must " "be either a ForeignKey or ManyToManyField." % (cls.__name__, idx, field)) # fields if cls.fields: # default value is None check_isseq(cls, 'fields', cls.fields) for field in cls.fields: check_formfield(cls, model, opts, 'fields', field) if cls.fieldsets: raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__) if len(cls.fields) > len(set(cls.fields)): raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__) # fieldsets if cls.fieldsets: # default value is None check_isseq(cls, 'fieldsets', cls.fieldsets) for idx, fieldset in enumerate(cls.fieldsets): check_isseq(cls, 'fieldsets[%d]' % idx, fieldset) if len(fieldset) != 2: raise ImproperlyConfigured("'%s.fieldsets[%d]' does not " "have exactly two elements." % (cls.__name__, idx)) check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1]) if 'fields' not in fieldset[1]: raise ImproperlyConfigured("'fields' key is required in " "%s.fieldsets[%d][1] field options dict." % (cls.__name__, idx)) flattened_fieldsets = flatten_fieldsets(cls.fieldsets) if len(flattened_fieldsets) > len(set(flattened_fieldsets)): raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__) for field in flattened_fieldsets: check_formfield(cls, model, opts, "fieldsets[%d][1]['fields']" % idx, field) # form if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm): raise ImproperlyConfigured("%s.form does not inherit from " "BaseModelForm." % cls.__name__) # filter_vertical if hasattr(cls, 'filter_vertical'): check_isseq(cls, 'filter_vertical', cls.filter_vertical) for idx, field in enumerate(cls.filter_vertical): f = get_field(cls, model, opts, 'filter_vertical', field) if not isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be " "a ManyToManyField." % (cls.__name__, idx)) # filter_horizontal if hasattr(cls, 'filter_horizontal'): check_isseq(cls, 'filter_horizontal', cls.filter_horizontal) for idx, field in enumerate(cls.filter_horizontal): f = get_field(cls, model, opts, 'filter_horizontal', field) if not isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be " "a ManyToManyField." % (cls.__name__, idx)) # radio_fields if hasattr(cls, 'radio_fields'): check_isdict(cls, 'radio_fields', cls.radio_fields) for field, val in cls.radio_fields.items(): f = get_field(cls, model, opts, 'radio_fields', field) if not (isinstance(f, models.ForeignKey) or f.choices): raise ImproperlyConfigured("'%s.radio_fields['%s']' " "is neither an instance of ForeignKey nor does " "have choices set." % (cls.__name__, field)) if not val in (HORIZONTAL, VERTICAL): raise ImproperlyConfigured("'%s.radio_fields['%s']' " "is neither admin.HORIZONTAL nor admin.VERTICAL." % (cls.__name__, field)) # prepopulated_fields if hasattr(cls, 'prepopulated_fields'): check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields) for field, val in cls.prepopulated_fields.items(): f = get_field(cls, model, opts, 'prepopulated_fields', field) if isinstance(f, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)): raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' " "is either a DateTimeField, ForeignKey or " "ManyToManyField. This isn't allowed." % (cls.__name__, field)) check_isseq(cls, "prepopulated_fields['%s']" % field, val) for idx, f in enumerate(val): get_field(cls, model, opts, "prepopulated_fields['%s'][%d]" % (field, idx), f)
def get_uselib_vars(self): _vars = set([]) for x in self.features: if x in USELIB_VARS: _vars |= USELIB_VARS[x] return _vars
Copyright (c) 2007-2008 Roc Zhou """ __author__ = "Roc Zhou <*****@*****.**>" __date__ = "23 May 2008" __version__ = "0.3" __license__ = "GPL v2.0" import re from gettext import gettext as _ from copy import copy,deepcopy import time try: set([]) # for Python 2.5 and latter except NameError: # for Python 2.3/2.4 compatible from sets import Set as set class RawTree: pass # Only root['trunk']['branch'] style # because Tree may conflicts with many reserved names of Python such as: # from, import, as, class, def, return, in, and, or, if, for, while, pass, try, except, finally, ... # __setitem__ should be overloaded # __setattr__ should raise TypeError or AttributeError # Of course, you can still make use of Tree's items functionality: # root.trunk['try'] = $value # ^_^ class TreeExc:
#! /usr/bin/env python # encoding: utf-8 # WARNING! All changes made to this file will be lost! import sys if sys.hexversion < 0x020400f0: from sets import Set as set import os, sys, re from waflib import TaskGen, Task, Utils, Logs, Build, Options, Node, Errors from waflib.Logs import error, debug, warn from waflib.TaskGen import after, before, feature, taskgen_method from waflib.Tools import c_aliases, c_preproc, c_config, c_asm, c_osx, c_tests from waflib.Configure import conf USELIB_VARS = Utils.defaultdict(set) USELIB_VARS['c'] = set( ['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CCDEPS', 'CFLAGS']) USELIB_VARS['cxx'] = set( ['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CXXDEPS', 'CXXFLAGS']) USELIB_VARS['d'] = set(['INCLUDES', 'DFLAGS']) USELIB_VARS['cprogram'] = USELIB_VARS['cxxprogram'] = set([ 'LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH' ]) USELIB_VARS['cshlib'] = USELIB_VARS['cxxshlib'] = set([ 'LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH' ]) USELIB_VARS['cstlib'] = USELIB_VARS['cxxstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['dprogram'] = set( ['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dshlib'] = set( ['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS'])
def _loadRepoFile(filename): """ Loads each repository file information. """ file = open(filename) # The computed aliases we have seen in the given file seen = set() repofile = ConfigParser.ConfigParser() repofile.read(filename) for repo in repofile.sections(): # Iterate through each repo found in file alias = "yumsync-%s" % repo name = _replaceStrings(repofile.get(repo, 'name')) baseurl = None mirrorlist = None # Some repos have baseurl, some have mirrorlist if repofile.has_option(repo, 'baseurl'): baseurl = _replaceStrings(repofile.get(repo, 'baseurl')) if baseurl.find("\n") >= 0: baseurl = baseurl.splitlines()[1] if baseurl == "file:///media/cdrom/": baseurl = "localmedia://" if baseurl == "file:///media/cdrecorder/": baseurl = "localmedia://" else: # baseurl is required for rpm-md channels baseurl = _searchComments(filename, repo) if repofile.has_option(repo, 'mirrorlist'): mirrorlist = _replaceStrings(repofile.get(repo, 'mirrorlist')) if not baseurl: baseurl = _findBaseUrl(mirrorlist, repo) if baseurl is None and mirrorlist is None: iface.warning(_("Yum channel %s does not contain baseurl or " \ "mirrorlist addresses. Not syncing.") % repo) return seen if repofile.has_option(repo, 'enabled'): enabled = not repofile.getboolean(repo, 'enabled') else: enabled = False data = { "type": "rpm-md", "name": name, "baseurl": baseurl, "disabled": enabled } if mirrorlist: data["mirrorlist"] = mirrorlist seen.add(alias) try: createChannel(alias, data) except Error, e: iface.error(_("While using %s: %s") % (filename, e)) else: # Store it persistently. sysconf.set(("channels", alias), data)