def process_and_get_line_number(s): #nonlocal line_number if s in line_map: return line_map[s] else: dimension = len(s) - 1 if dimension > dimension_cutoff: for subset in itertools.combinations( s, dimension_cutoff + 1 ): # Take all subsets of size dimension_cutoff + 1 process_and_get_line_number(ImmutableSet(subset)) elif dimension > 0: subsets_line_numbers = [] for e in s: subsets_line_numbers.append( process_and_get_line_number( ImmutableSet(s - Set([e])))) output_file.write("\n" + str(dimension)) for l in subsets_line_numbers: output_file.write(" " + str(l)) line_map[s] = Context.line_number Context.line_number += 1 return Context.line_number - 1 else: raise Exception( "Should have already added single point for base case: " + str(s))
def _loadFileList(self, fname): fullpath = os.path.join(self.path, fname) if os.path.exists(fullpath): entries = open(fullpath, 'r').read().split('\n') return ImmutableSet(entries) else: return ImmutableSet()
def sumCatalogResultByWorklist(grouped_worklist_dict, catalog_result): """ Return a dict regrouping each worklist's result, extracting it from catalog result. Build a dictionnary summing up which value combination interests which worklist, then iterate catalog result lines and give results to corresponding worklists. It is better to avoid reading multiple times the catalog result from flexibility point of view: if it must ever be changed into a cursor, this code will keep working nicely without needing to rewind the cursor. This code assumes that all worklists have the same set of criterion ids, and that when a criterion id is associated with an ExclusionList it is also true for all worklists. """ worklist_result_dict = {} if len(catalog_result) > 0: # Transtype all worklist definitions where needed criterion_id_list = [] class_dict = { name: _sql_cast_dict.get(x['type'], _sql_cast_fallback) for name, x in catalog_result.data_dictionary().iteritems() } for criterion_dict in grouped_worklist_dict.itervalues(): for criterion_id, criterion_value_list in criterion_dict.iteritems( ): if type(criterion_value_list) is not ExclusionList: criterion_id_list.append(criterion_id) expected_class = class_dict[criterion_id] if type(criterion_value_list[0]) is not expected_class: criterion_dict[criterion_id] = ImmutableSet( [expected_class(x) for x in criterion_value_list]) elif type(criterion_value_list) is not ImmutableSet: criterion_dict[criterion_id] = ImmutableSet( criterion_dict[criterion_id]) # Read catalog result and distribute to matching worklists for result_line in catalog_result: result_count = int(result_line[COUNT_COLUMN_TITLE]) for worklist_id, criterion_dict in grouped_worklist_dict.iteritems( ): is_candidate = True for criterion_id in criterion_id_list: criterion_value_set = criterion_dict[criterion_id] if result_line[criterion_id] not in criterion_value_set: is_candidate = False break if is_candidate: try: worklist_result_dict[worklist_id] += result_count except KeyError: worklist_result_dict[worklist_id] = result_count return worklist_result_dict
def setUp(self): self.dedekindNode = DedekindNode( 6, [63, 62, 47, 31, 55, 46, 30, 15, 23, 14]) self.answerSet = set() self.answerSet.add(ImmutableSet(getConfAsSet(14, 6))) self.answerSet.add(ImmutableSet(getConfAsSet(23, 6))) self.fullNode = getFullNode(6) self.fullAnswerSet = set() self.fullAnswerSet.add(ImmutableSet()) self.emptyNode = DedekindNode(6, []) self.emptyAnswerSet = set()
def computeAllJust(setDescription, artSet, justSet, curpath, allpaths): ''' Implementation of Hitting Set Tree found directly from EulerX. A few modifications are made to ensure that it is compatible with this library's implementation of logarathmic Extraction, otherwise everything else is the same ''' for path in allpaths: if path.issubset(curpath): return #must be 'not' to be consistent with this library's implementation. #Without it, it does not compute the MIS properly #i.e. it does not pass any of the algorithm tests. if not setDescription.isConsistent(artSet): allpaths.add(curpath) return j = sets.Set() for s in justSet: if len(s.intersection(curpath)) == 0: j = s if len(j) == 0: j = computeSingleMIS(setDescription, artSet) if len(j) != 0: justSet.add(j) for a in j: tmpcur = curpath.union(set(a)) tmpart = artSet - ImmutableSet(a) computeAllJust(setDescription, tmpart, justSet, tmpcur, allpaths)
def __init__(self, landmark_list, birth_time, keep_sorted_list): if (keep_sorted_list): self.sll = sorted(landmark_list) else: self.sll = None self.landmark_set = ImmutableSet(landmark_list) self.birth_time = birth_time
def reduce(self, tokens=1): """ Indiciates whether specified number of tokens can be used and if so removes them from all of the buckets. This is applied over each bucket, hence a single bucket running out of tokens would indicate the tokens cannot be used. Parameters ---------- tokens : int Number of tokens to remove from the bucket. Returns ------- bool Whether requested number of tokens can be used. """ # Check if all buckets have tokens and build the set of exhausted buckets. self._exhausted = ImmutableSet([ key for key, bucket in self._buckets.iteritems() if bucket.get() < tokens ]) # One or more rate limits has been reached. if len(self._exhausted) > 0: return False # Since we're within limits, take out the tokens from all the buckets. for bucket in self._buckets.values(): bucket.reduce(tokens) return True
def do_get_property(self, pspec): if pspec.name == "windows": return ImmutableSet(self._windows.itervalues()) elif pspec.name == "toplevel": return self._world_window else: assert False
def __init__(self, condition, relvars, outers=(), columns=()): myrels = [] relUsage = {} def checkUsage(rv): r = id(rv) if r in relUsage: raise ValueError("Relvar used more than once", rv) else: relUsage[r] = True return rv outers = map(checkUsage, outers) for rv in relvars: myrels.extend(map(checkUsage, rv.getInnerRVs())) outers.extend(map(checkUsage, rv.getOuterRVs())) condition = condition & rv.getCondition() if len(myrels) < 1: raise TypeError("BasicJoin requires at least 1 relvar") myrels.sort() outers.sort() self.relvars = tuple(myrels) self.outers = tuple(outers) self.condition = condition self.columns = Graph(columns) self._hashAndCompare = (self.__class__.__name__, condition, self.relvars, self.outers, ImmutableSet(self.columns))
def Update(self, sensor, solve=True): """ Update belief about the world, given a sensor (with associated paramters, including position and orientation). For all voxels in range, create a rate update which is uniform and sums to the measurement value, then compute a weighted average at each point, trusting lower values more than higher ones. """ measurement = sensor.Sense() if measurement > self.k_: print "Measured too many sources. Did not update." return False # Identify voxels that are in range. Store row-major flattened indices. in_view_list = [] for ii in range(self.belief_.shape[0]): for jj in range(self.belief_.shape[1]): if sensor.VoxelInView(ii, jj): in_view_list.append(ii * self.belief_.shape[1] + jj) # Set up constrained least squares problem and solve. self.viewed_lists_.append(in_view_list) self.viewed_sets_.append(ImmutableSet(in_view_list)) self.measurements_.append(measurement) if solve: self.SolveLeastSquares() return True
def __init__(self, vars): """Create a new cluster keyword args: vars - list of variables """ self.vars = ImmutableSet(vars) self.overconstrained = False
def mincut(self): """Returns a minimum cut of the graph. Implements the Stoer/Wagner algorithm. The graph is interpreted as a undirected graph, by adding the weights of co-edges. Returns (value, edges, g1, g2) where value is the weight of the cut, edges is the set of cut edges, g1 and g2 are disjoint sets of vertices. """ # create graph of one-clusters graph = Graph() for edge in self.edges(): (v1,v2) = edge g1 = ImmutableSet([v1]) g2 = ImmutableSet([v2]) graph.add_edge(g1,g2) # Stoer/Wagner algorithm mincutvalue = None mincut = ImmutableSet() while len(graph.vertices()) > 1: (phasecut,phasecutvalue) = self._mincutphase(graph) if mincutvalue == None or phasecutvalue < mincutvalue: mincutvalue = phasecutvalue mincut = phasecut # rewrite output g1 = mincut g2 = ImmutableSet(self.vertices()).difference(g1) edges = Set() for v in g1: for k in self.adjacent_vertices(v): if k in g2: if self.has_edge(v,k): edges.add((v,k)) if self.has_edge(k,v): edges.add((k,v)) for v in g2: for k in self.adjacent_vertices(v): if k in g1: if self.has_edge(v,k): edges.add((v,k)) if self.has_edge(k,v): edges.add((k,v)) return (mincutvalue, ImmutableSet(edges), g1, g2)
def add(self, pattern, endpoint, **kw): if self._patterns.has_key(pattern): raise Exception('duplicate pattern', pattern) self._endpoints[(endpoint, ImmutableSet( make_variables(pattern)))] = make_url_for(pattern) self._patterns[pattern] = (re.compile(make_pattern(pattern)), endpoint, kw) self._pattern_selector = re.compile( make_selector(self._patterns.iterkeys()))
def evalRiskCycleExit(cycNode, nodeSet): # print "--- evalRiskCycleExit --- " # print "cycNode = " + str(cycNode) # print "nodeSet: " + str(nodeSet) cycleSet, entryNodes, paths = cycleData[cycNode] # print "cycleSet = " + str(cycleSet) # print "entryNodes = " + str(entryNodes) # print "paths = " + str(paths) # print "nodeSet = " + str(nodeSet) # print "* PATHS *" # for pk in paths: # print str(pk) + ": " + str(paths[pk]) # print "* NODESET -- " + str(nodeSet) + " *" nodePaths = {} allPaths = [] for n in nodeSet: nodePaths[n] = paths[abs(n)] allPaths.extend(paths[abs(n)]) # print "relevant paths = " + str(allPaths) # Find d-separating set over paths seenSet = Set([]) dSet = Set([]) for pp in allPaths: # for all paths # Add nodes seen multiple times dSet.update(pp & seenSet) # Track all AND-nodes appearing in paths to m seenSet.update(pp.difference(orNodes)) qSet = entryNodes & seenSet # Get all entry nodes leading to m dSet.difference_update(qSet) # Remove entry nodes from d-separating set # Remove d-separating nodes with probability 1 for d in dSet.copy(): if irv[d] == 1: dSet.remove(d) # print "d-separating set = " + str(dSet) # print "relevant entry nodes = " + str(qSet) qSumSet = getSummationValues(qSet.copy()) # print qSumSet answer = evalCycleNodeset(dSet.copy(), 1, Set([]), qSumSet, nodeSet, nodePaths) phi[ImmutableSet(nodeSet)] = answer # print "phi of " + str(nodeSet) + " is " + str(answer) # print "\nQuitting..." # sys.exit(0) return answer
class Analex: ############################################################################# ## Conjunto de palabras reservadas para comprobar si un identificador es PR ############################################################################# PR = ImmutableSet(["PROGRAMA", "VAR", "VECTOR","DE", "ENTERO", "REAL", "BOOLEANO", "PROC", "FUNCION", "INICIO", "FIN", "SI", "ENTONCES", "SINO", "MIENTRAS", "HACER", "LEE", "ESCRIBE", "Y", "O", "NO", "CIERTO","FALSO"]) ############################################################################ # # Funcion: __init__ # Tarea: Constructor de la clase # Prametros: flujo: flujo de caracteres de entrada # Devuelve: -- # ############################################################################ def __init__(self): #Debe completarse con los campos de la clase que se consideren necesarios self.nlinea=1 #contador de lineas para identificar errores ############################################################################ # # Funcion: Analiza # Tarea: Identifica los diferentes componentes lexicos # Prametros: -- # Devuelve: Devuelve un componente lexico # ############################################################################ def Analiza(self): ch=leerCaracter if ch==" ": # quitar todos los caracteres blancos #buscar el siguiente componente lexico que sera devuelto ) elif ch== "+": # debe crearse un objeto de la clasee OpAdd que sera devuelto elif #asi con todos los simbolos y operadores del lenguaje return componentes.CorCi() elif ch == "{": #Saltar todos los caracteres del comentario # y encontrar el siguiente componente lexico elif ch == "}": print "ERROR: Comentario no abierto" # tenemos un comentario no abierto return self.Analiza() elif ch==":": #Comprobar con el siguiente caracter si es una definicion de la declaracion o el operador de asignacion elif #Completar los operadores y categorias lexicas que faltan elif ch es un caracter #leer entrada hasta que no sea un caracter valido de un identificador #devolver el ultimo caracter a la entrada # Comprobar si es un identificador o PR y devolver el objeto correspondiente elif ch es numero: #Leer todos los elementos que forman el numero # devolver el ultimo caracter que ya no pertenece al numero a la entrada # Devolver un objeto de la categoria correspondiente elif ch== "\n":
def __init__(self, variables): """Create a new balloon keyword args: vars - collection of PointVar's """ if len(variables) < 3: raise StandardError, "balloon must have at least three variables" self.vars = ImmutableSet(variables) self.overconstrained = False
def getConfAsSet(self, inputConf): from sets import ImmutableSet confSet = set() bitShift = 0 while bitShift <= self.inputSize: if 1 << bitShift in self.bitToElement and (1 << bitShift & inputConf): confSet.add(self.bitToElement[1 << bitShift]) bitShift += 1 return ImmutableSet(confSet)
def test_constructor2(self): inner = ImmutableSet([1]) outer = Set([inner]) element = list(outer).pop() outer.remove(inner) self.assertEqual(type(element), ImmutableSet) outer.add(inner) # Rebuild set of sets with .add method outer.remove(inner) self.assertEqual(outer, Set()) # Verify that remove worked outer.discard(inner) # Absence of KeyError indicates working fine
def __init__(self, vocab, nlp): self.vocab = vocab self.features = {} self.chunks = defaultdict(int) self.AcceptedPOSTags = ImmutableSet([ nlp.vocab.strings['JJ'], nlp.vocab.strings['VB'], nlp.vocab.strings['RB'], nlp.vocab.strings['RBR'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS'], nlp.vocab.strings['RBS'], nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP'] ])
def generatePowerset(theSet): ''' Generates powerset of a given set. Original code found at http://stackoverflow.com/questions/18826571/python-powerset-of-a-given-set-with-generators ''' powerSet = set() from itertools import chain, combinations for subset in chain.from_iterable( combinations(theSet, r) for r in range(len(theSet) + 1)): powerSet.add(ImmutableSet(subset)) return powerSet
def compatibleWithBase(self, base): messagenames = (ImmutableSet(self.messages.keys()) | ImmutableSet(base.messages.keys())) compatibility = Same() for name in messagenames: selfmessage = self.messages.get(name, None) basemessage = base.messages.get(name, None) if not selfmessage: c = Older("missing message %s, did you mean to deprecate?" % name) elif not basemessage: c = Newer("added message %s" % name) else: c = selfmessage.compatibleWithBase(basemessage) c.prefix("in message %s: " % name) compatibility = compatibility.combine(c) return compatibility
def __init__(self, **kwargs): """ Initializes the rate limiter with provided buckets. Parameters ---------- **kwargs : Bucket Each named parameter's key is the bucket's name and the value is the Bucket itself. """ self._buckets = kwargs self._exhausted = ImmutableSet()
def __init__(self, verts, fid): self.verts = verts vertset = ImmutableSet( (self.verts[0].id, self.verts[1].id, self.verts[2].id)) for vert in self.verts: vert.faces[self.getHashableSet()] = self self.id = fid self.edges = [] self.RefactorArea() self.dead = False
def __init__(self, cvar, xvars): """Create a new hedgehog keyword args: cvar - center variable xvars - list of variables """ self.cvar = cvar if len(xvars) < 2: raise StandardError, "hedgehog must have at least three variables" self.xvars = ImmutableSet(xvars) self.vars = self.xvars.union([self.cvar]) self.overconstrained = False
def testOptionsAndHashing(self): r1 = Requirement.parse("Twisted[foo,bar]>=1.2") r2 = Requirement.parse("Twisted[bar,FOO]>=1.2") r3 = Requirement.parse("Twisted[BAR,FOO]>=1.2.0") self.assertEqual(r1,r2) self.assertEqual(r1,r3) self.assertEqual(r1.extras, ("foo","bar")) self.assertEqual(r2.extras, ("bar","foo")) # extras are normalized self.assertEqual(hash(r1), hash(r2)) self.assertEqual( hash(r1), hash(("twisted", ((">=",parse_version("1.2")),), ImmutableSet(["foo","bar"]))) )
def __init__(self, expr): self.expr = expr self.pos = 0 self.nstates = 0 self.expect = {} self.successor = {} self.alphabet = Set() self.initial, penultimate, epsilon = self.expression() final = self.newstate(None) for state in penultimate: self.successor[state].add(final) self.final = ImmutableSet([final]) if epsilon: self.final = self.final | self.initial
def computeAllMHSHelper(setDescription, constraints, misSet, currPath, paths): #paths holds all previously visited paths of the hitting set tree #currPath is the current. If any previous path is a subset of this one #the we have already computed all MIS that would be found in the current path's subtree. for path in paths: if path in currPath: return #if the current set of constraints is consistent #Then there cannot be anymore MIS in its subtree #so we add the current path to the set of paths enumerated and return. if setDescription.isConsistent(constraints): paths.add(currPath) return #In order to avoid redundant MIS computations #We check the current set of MIS misSet #If it is possible to find any of the already computed MIS in the current iteration #(it does not share an element in the currPath) then we just use that MIS #and continue down the tree currentMIS = ImmutableSet() for mis in misSet: if len(mis.intersection(currPath)) == 0: currentMIS = mis break #If not MIS matches the previous description, we will need to #compute a new one. if currentMIS == ImmutableSet(): currentMIS = computeSingleMIS(setDescription, constraints) misSet.add(currentMIS) #iterate through the children of the current path for element in currentMIS: childPath = currPath.union(set(element)) computeAllMHSHelper(setDescription, constraints - ImmutableSet(element), misSet, childPath, paths)
def computeAllMIS(setDescription, constraints): ''' Taken from 'A Hybrid Diagnosis Approach Combining Black-Box and White-Box Reasoning'. This attempts to find all Minimal Inconsistent Subset of Constraints. @param setDescription- A set of rules linking several items together. Think of this as boolean equation in Conjunctive Normal Form. @param Constraints- a set of items we would like to include. Think of this as an assignment for the previous boolean equation. ''' misSet = set() currPath = ImmutableSet() paths = set() LogarithmicExtraction.newRun = True computeAllMHSHelper(setDescription, constraints, misSet, currPath, paths) return misSet
def computeSingleMIS(setDescription, constraints): ''' Taken from 'A Hybrid Diagnosis Approach Combining Black-Box and White-Box Reasoning'. This attempts to find the Minimal Subset of Constraints that will be inconsistent for the given Set Description. @param setDescription- A set of rules linking several items together. Think of this as boolean equation in Conjunctive Normal Form. @param Constraints- a set of items we would like to include. Think of this as a value assignment for the previous boolean equation. ''' potentialMIS = computeSingleMISHelper(setDescription, ImmutableSet(), constraints) #The Euler Implementation does not correctly compute the MIS for a set description #where everything is always inconsistent (an empty set is inconsistent) #This makes sense, but this library also considers this set description, #so we must check the empty configuration here. global newRun if newRun == True and len(potentialMIS) == 1 \ and not setDescription.isConsistent(ImmutableSet()): newRun = False return ImmutableSet() else: newRun = False return potentialMIS
def __init__(self, v1, v2): self.v1 = v1 self.v2 = v2 vertset = ImmutableSet((self.v1.id, self.v2.id)) self.v1.edges[vertset] = self self.v2.edges[vertset] = self # Get faces common for both v1 and v2 self.faces = [] self.collapsed_faces = {} self.RefactorLength() self.dead = False