def get_doc_phrase_freq(self, phrase, field, slop, ordered): """ Returns collection frequency for a given phrase and field. :param phrase: str :param field: field name :param slop: number of terms in between :param ordered: If true, term occurrences should be ordered :return: dictionary {doc: freq, ...} """ # creates span near query span_near_query = self.get_span_query(phrase.split(" "), field, slop=slop, ordered=ordered) # extracts document frequency self.open_searcher() index_reader_context = self.searcher.getTopReaderContext() term_contexts = HashMap() terms = TreeSet() span_near_query.extractTerms(terms) for term in terms: term_contexts.put(term, TermContext.build(index_reader_context, term)) leaves = index_reader_context.leaves() doc_phrase_freq = {} # iterates over all atomic readers for atomic_reader_context in leaves: bits = atomic_reader_context.reader().getLiveDocs() spans = span_near_query.getSpans(atomic_reader_context, bits, term_contexts) while spans.next(): lucene_doc_id = spans.doc() doc_id = atomic_reader_context.reader().document(lucene_doc_id).get(self.FIELDNAME_ID) if doc_id not in doc_phrase_freq: doc_phrase_freq[doc_id] = 1 else: doc_phrase_freq[doc_id] += 1 return doc_phrase_freq
def __init__(self): ''' Constructor ''' s = TreeSet() s.add('hello') s.add('world') print s
def __init__(self, path, src, runs): self.tags = TreeSet() self.buffer_ = [] self.sourceOffset = 0 self.source = src for run in runs: self.tags.add(StyleApplier.StartTag(run, self)) self.tags.add(StyleApplier.EndTag(run, self))
def testJava(): print "Hello world!" foo = TreeSet() hello = 'hello' foo.add(hello) foo.add("world") print foo print CompareUtils.compare(3, 4) print "Goodbye world!" print "sys.path = " + str(sys.path)
def wrap(cls, topLevelReaderContext, query): termContexts = HashMap() terms = TreeSet() query.extractTerms(terms) for term in terms: termContexts.put(term, TermContext.build(topLevelReaderContext, term)) leaves = topLevelReaderContext.leaves() if leaves.size() == 1: ctx = leaves.get(0) return query.getSpans(ctx, ctx.reader().getLiveDocs(), termContexts) return MultiSpansWrapper(leaves, query, termContexts)
def parseMappingString(mappingString): bitPositions = TreeSet(); bytesNumber = len(mappingString) / SYMBOLS_PER_BYTE; for i in range(bytesNumber): currentPosition = i * SYMBOLS_PER_BYTE; currentByteString = mappingString[currentPosition : currentPosition + SYMBOLS_PER_BYTE]; currentByte = Integer.parseInt(currentByteString, 16); for j in range(BITS_IN_SYMBOL): if (currentByte & 1) == 1: bitPositions.add(i*BITS_IN_SYMBOL + BITS_IN_SYMBOL - j); currentByte = currentByte >> 1; return list(ArrayList(bitPositions))
def saveModels(mol, files, type): active = mol.getActiveStructures() if type == 'cif': mol.resetActiveStructures() if 'final' not in files[ -1]: #don't write out reference structure if in file list sNums = [i for i in range(len(files) - 1)] treeSet = TreeSet(sNums) mol.setActiveStructures(treeSet) molName = mol.getName() cifFile = os.path.join(os.getcwd(), molName + "_all.cif") out = FileWriter(cifFile) MMcifWriter.writeAll(out, molName) elif type == 'pdb': for (i, file) in zip(active, files): (dir, fileName) = os.path.split(file) newFileName = 'sup_' + fileName newFile = os.path.join(dir, newFileName) molio.savePDB(mol, newFile, i)
def generate(self, idx, abspath): from Analyzer import Analyzer from Binding import Binding if isinstance(idx, Analyzer): mt = idx.loadFile(abspath) if mt is None: return ArrayList() scope, path = mt.getTable(), abspath else: scope, path = idx, abspath # alt impl. result = ArrayList() entries = TreeSet() for b in scope.values(): if not b.isSynthetic() and not b.isBuiltin() and path == b.getFile(): entries.add(b) entries = sorted(entries, lambda a,b: a.compareTo(b)) for nb in entries: kids = None if nb.getKind() == Binding.Kind.CLASS: realType = nb.getType(); if realType.isUnionType(): for t in realType.asUnionType().getTypes(): if t.isClassType(): realType = t break kids = self.generate(realType.getTable(), path) kid = Outliner.Branch() if (kids is not None) else Outliner.Leaf(); kid.setOffset(nb.getStart()) kid.setQname(nb.getQname()) kid.setKind(nb.getKind()) if kids is not None: kid.setChildren(kids) result.append(kid) return result
def __activate__(self, context): formData = context["formData"] services = context["Services"] response = context["response"] query = "keywords:[* TO *]" q = formData.get("q") if q: query += " AND keywords:(%(q)s OR %(q)s*)" % { "q": q } req = SearchRequest(query) req.setParam("fl", "keywords") req.setParam("rows", "50") keywords = TreeSet() indexer = services.getIndexer() out = ByteArrayOutputStream() indexer.search(req, out) result = SolrResult(ByteArrayInputStream(out.toByteArray())) for doc in result.getResults(): for keyword in doc.getList("keywords"): if keyword.startswith(q): keywords.add(keyword) writer = response.getPrintWriter("text/plain; charset=UTF-8") writer.println("\n".join(keywords)) writer.close()
def __init__(self, ont='', file_path='', uri=''): self.manager = OWLManager.createOWLOntologyManager() if not ont: if file_path: self.ont = self.manager.loadOntologyFromOntologyDocument( File(file_path)) elif uri: self.ont = self.manager.loadOntologyFromIRI( IRI.create(uri)) # check this! else: warnings.warn("Constructor failed. Empty args") else: self.ont = ont self.factory = self.manager.getOWLDataFactory() self.simple_sfp = SimpleShortFormProvider() # .getShortForm(iri) self.simple_iri_sfp = SimpleIRIShortFormProvider() ontset = TreeSet() ontset.add(self.ont) #public BidirectionalShortFormProviderAdapter(OWLOntologyManager man, #java.util.Set<OWLOntology> ontologies, #ShortFormProvider shortFormProvider) # Providing the manager, means that this listens for changes. self.bi_sfp = BidirectionalShortFormProviderAdapter( self.manager, ontset, self.simple_sfp) # .getShortForm(iri); .getEntity()
# Calendar.getInstance().get(Calendar.HOUR_OF_DAY), # Calendar.getInstance().get(Calendar.MINUTE), # Calendar.getInstance().get(Calendar.SECOND), # Calendar.getInstance().get(Calendar.MILLISECOND) # ) ADAPTER_NAME = "Hardware Report" FILE_SEPARATOR = "\\" C__FILTERED_CUTSOMER_CLASSIFICATION = ("Green", "Blue") C__FILTERED_SYSTEM_STATUS = ("in production") ############################################################################################## # GLOBALS ############################################################################################## g__NodeWithSWandCPUQty = 0 g__SortedData = TreeSet() adapterConfigBaseDir = "%s%s%s%s" % ( CollectorsParameters.BASE_PROBE_MGR_DIR, CollectorsParameters.getDiscoveryConfigFolder(), FILE_SEPARATOR, ADAPTER_NAME) def quote(stringin): return '"' + str(stringin) + '"' class CPUData: def __init__(self): self.cputype = '' self.cpuspeed = ''
def __init__(self, name): self.name = name self.configid = None self.clusters = TreeSet()
coregroups = TreeMap() for i in range(numberOfCoreGroups): coregroupname = "BridgedCoreGroup" + str(i) if (i == 0): coregroupname = "DefaultCoreGroup" if (proxyCoreGroup == 1 and i == 1): coregroupname = "BridgedCoreGroupODR" coregroups.put(coregroupname, CoreGroup(coregroupname)) # # process all servers # servers = TreeMap() clusters = TreeMap() coregroupBridges = TreeSet() print "Retrieving existing coregroups" coregroupIds = convertToList(AdminConfig.list("CoreGroup")) for coregroup in coregroupIds: print "Existing coregroup: " + AdminConfig.showAttribute(coregroup, "name") cgname = AdminConfig.showAttribute(coregroup, "name") cg = coregroups.get(cgname) #if full reconfigure is selected, existing coregroups will be deleted and not be added to the coregroups list if (fullReconfigure == 0): if (cg == None): cg = CoreGroup(cgname) coregroups.put(cgname, cg) if (cg != None): cg.configid = coregroup cgservers = convertToList(AdminConfig.list("CoreGroupServer", coregroup)) for cgs in cgservers:
def getSortedKeySet(self): return TreeSet(self.metadata.getJsonObject().keySet())
out = renderResponse.getPortletOutputStream() custom = Custom(renderRequest, renderResponse) out.write(""" <b>Using the JDK</b> <br /><br /> The following set has been generated with java.util.TreeSet. <br /><br /> \n """) set = TreeSet() set.add("foo") set.add("Bar") set.add("baz") for v in set: out.write(v + "\n") out.write(""" <br /><br /> <b>Invoking Liferay Services</b> <br /><br /> \n """)
from com.xebialabs.deployit.plugin.api.udm.base import BaseDeployable from com.xebialabs.deployit.plugin.api.reflect import Type oldType = request.query['p1'] appId = request.query['p2'] oldVer = request.query['p3'] newVer = request.query['p4'] migratorModule = __import__('typeMigrator.' + oldType, fromlist=['']) # Create a new deployment package repositoryService.copy("%s/%s" % (appId, oldVer), "%s/%s" % (appId, newVer)) # Read the new deployment package app = repositoryService.read("%s/%s" % (appId, newVer)) # Loop through the deployables result = [] for item in app.getProperty('deployables'): deployable = repositoryService.read(item.id) if deployable.type == oldType: newDeployable = BaseDeployable() newDeployable.setId("%s/%s/%s" % (appId, newVer, "New" + deployable.name)) newDeployable.setTags(TreeSet()) migratorModule.mapProperties(deployable, newDeployable) result.append(newDeployable.getType().toString()) repositoryService.create("%s/%s/%s" % (appId, newVer, "New" + deployable.name), newDeployable) repositoryService.delete(deployable.id) response.entity = result
def __init__(self, tableOidOffset): self.queryElements = TreeSet(OffsetBasedComparator()) self.tableOidOffset = tableOidOffset
def __init__(self, objectName): "@param objectName: the name of the queried CIM class" self.queryElements = TreeSet(ColumnNameBasedComparator()) self.objectName = objectName self.whereClause = None