def findMessages(mc,custid,count): print 'findMessages() find ',count amsm = mc.getActiveMailboxStoreManager() ids = HashSet() msgs = HashSet() retries = 30 while msgs.size() < count and retries > 0: for p in mc.getPartitionManager().listPartitions(): if p.isReadOnly(): continue print 'searching for messages to be stored in',p tm = amsm.findMessages([SearchConstraint(IActiveMailboxStoreManager.PROP_CUST_ID, SearchConstraintOperator.CONSTRAINT_EQUALS,int(custid))],p) if tm.size() > 0: msgs.addAll(filter(lambda x : not ids.contains(x.getMessageId()),tm)) ids.addAll(map(lambda x : x.getMessageId(), tm)) if msgs.size() < count: time.sleep(10) retries = retries - 1 print 'findMessages found',msgs.size(),'ids',ids.size() if msgs.isEmpty(): print 'Failed to find any messages in DB' raise Exception('Failed to find any messages in DB') if msgs.size() < count: print 'Warning, did not find all messages expected' return msgs
def getNodeObjects(api, filters): found = 0 ndMap = {} ndSet = HashSet() lanSwitchCapability = "com.hp.nnm.capability.node.lanswitching" ipForwardingCapability = "com.hp.nnm.capability.node.ipforwarding" try: ndStub = api.getStub(NnmServicesEnum().Node) for filter in filters: allNodesArray = ndStub.getNodes(filter) allNodes = allNodesArray.getItem() if allNodes != None: found = 1 logger.debug("Retrieved %s Node Objects" % (len(allNodes))) for i in range(len(allNodes)): if (notNull(allNodes[i].getId()) and notNull(allNodes[i].getName()) and notNull(allNodes[i].getCreated()) and notNull(allNodes[i].getModified())): ## Don't add duplicate Nodes if ndSet.contains(allNodes[i].getId()): continue else: ndSet.add(allNodes[i].getId()) # The capabilities com.hp.nnm.capability.node.lanswitching and # com.hp.nnm.capability.node.ipforwarding have replaced isLanSwitch and isIPv4Router respectively. isLanSwitch = 0 isRouter = 0 caps = allNodes[i].getCapabilities() if (notNull(caps)): for cap in caps: key = cap.getKey().strip() if (key == lanSwitchCapability): isLanSwitch = 1 if (key == ipForwardingCapability): isRouter = 1 ndMap[allNodes[i].getId()] = UNode(allNodes[i].getId(), allNodes[i].getName(), isRouter, isLanSwitch, allNodes[i].getSystemName(), allNodes[i].getSystemContact(), allNodes[i].getSystemDescription(), allNodes[i].getSystemLocation(), allNodes[i].getSystemObjectId(), allNodes[i].getLongName(), allNodes[i].getSnmpVersion(), allNodes[i].getDeviceModel(), allNodes[i].getDeviceVendor(), allNodes[i].getDeviceFamily(), allNodes[i].getDeviceDescription(), allNodes[i].getDeviceCategory(), '', '') else: break except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) errMsg = 'Exception:\n %s' % stacktrace logger.error(errMsg) api.Framework.reportWarning(errMsg) if found: logger.debug('Created a dictionary of %d Node objects' % (len(ndMap))) else: errMsg = 'Did not find any Node objects' logger.debug(errMsg) api.Framework.reportWarning(errMsg) return ndMap
def sliceSpaces( spaces): checked = HashSet() slices = [] for i, space in enumerate(spaces): if not checked.contains(space): current = MeshMaker.createNewVerticalSlice(spaces, i) checked.addAll(current.tiles) slices.append(current) return slices
def test_HashSet(self): """create HashSet in JVM (from the JavaSet) """ hashSet = HashSet(self.javaSet) # print "created HashSet:", hashSet, type(hashSet) self.assertEqual(self.javaSet.size(), hashSet.size(), "HashSet has same size") elem0 = list(self.testSet)[0] self.assertTrue(hashSet.contains(elem0))
def test_HashSet(self): """create HashSet in JVM (from the JavaSet) """ hashSet = HashSet(self.javaSet) # print "created HashSet:", hashSet, type(hashSet) self.assertEqual(self.javaSet.size(), hashSet.size(), "HashSet has same size") elem0 = list(self.testSet)[0] self.assertTrue(hashSet.contains(elem0))
def lopUselessLeaves(cls, pn): """ generated source for method lopUselessLeaves """ usefulComponents = HashSet() toAdd = Stack() toAdd.add(pn.getTerminalProposition()) usefulComponents.add(pn.getInitProposition()) for goalProps in pn.getGoalPropositions().values(): toAdd.addAll(goalProps) for legalProps in pn.getLegalPropositions().values(): toAdd.addAll(legalProps) while not toAdd.isEmpty(): if usefulComponents.contains(curComp): continue usefulComponents.add(curComp) toAdd.addAll(curComp.getInputs()) allComponents = ArrayList(pn.getComponents()) for c in allComponents: if not usefulComponents.contains(c): pn.removeComponent(c)
def getProducibleVars(self, sentence): """ generated source for method getProducibleVars """ if not self.form.matches(sentence): raise RuntimeException("Sentence " + sentence + " does not match constant form") tuple_ = GdlUtils.getTupleFromSentence(sentence) candidateVars = HashSet() # Variables that appear multiple times go into multipleVars multipleVars = HashSet() # ...which, of course, means we have to spot non-candidate vars nonCandidateVars = HashSet() i = 0 while i < len(tuple_): if isinstance(term, (GdlVariable,)) and not multipleVars.contains(term): if candidateVars.contains(var) or nonCandidateVars.contains(var): multipleVars.add(var) candidateVars.remove(var) elif self.dependentSlots.get(i): candidateVars.add(var) else: nonCandidateVars.add(var) i += 1 return candidateVars
def getTopologicalOrdering(cls, forms, dependencyGraph, usingBase, usingInput): """ generated source for method getTopologicalOrdering """ queue = LinkedList(forms) ordering = ArrayList(len(forms)) alreadyOrdered = HashSet() while not queue.isEmpty(): for dependency in dependencyGraph.get(curForm): if not dependency == curForm and not alreadyOrdered.contains(dependency): readyToAdd = False break if usingBase and (curForm.__name__ == cls.TRUE or curForm.__name__ == cls.NEXT or curForm.__name__ == cls.INIT): if not alreadyOrdered.contains(baseForm): readyToAdd = False if usingInput and (curForm.__name__ == cls.DOES or curForm.__name__ == cls.LEGAL): if not alreadyOrdered.contains(inputForm): readyToAdd = False if readyToAdd: ordering.add(curForm) alreadyOrdered.add(curForm) else: queue.add(curForm) ConcurrencyUtils.checkForInterruption() return ordering
def updateDomains(self): """ generated source for method updateDomains """ changedSomething = True itrNum = 0 lastUpdatedDomains = HashSet(self.domains.values()) while changedSomething: GamerLogger.log("StateMachine", "Beginning domain finding iteration: " + itrNum) changedSomething = False for d in domains.values(): for ruleRef in d.ruleRefs: for c in ruleRef.conditions: if lastUpdatedDomains.contains(c.dom): containsUpdatedDomain = True break if not containsUpdatedDomain: continue rulesConsidered += 1 for instantiation in instantiations: for t in ruleRef.productionTemplate: if isinstance(t, (GdlConstant, )): a.add(t) else: a.add(instantiation.get(var)) if not d.assignments.contains(a): currUpdatedDomains.add(d) d.assignments.add(a) changedSomething = True d.addAssignmentToIndex(a) if len(instantiations) == 0: findSatisfyingInstantiations(ruleRef) for t in ruleRef.productionTemplate: if isinstance(t, (GdlConstant, )): a.add(t) else: isVar = True break if not isVar and not d.assignments.contains(a): currUpdatedDomains.add(d) d.assignments.add(a) changedSomething = True d.addAssignmentToIndex(a) itrNum += 1 lastUpdatedDomains = currUpdatedDomains GamerLogger.log("StateMachine", "\tDone with iteration. Considered " + rulesConsidered + " rules.")
def getIPAddressObjects(api, filters): found = 0 ipMap = {} ipSet = HashSet() try: ipStub = api.getStub(NnmServicesEnum().IPAddress) for filter in filters: allIpsArray = ipStub.getIPAddresses(filter) allIps = allIpsArray.getItem() if allIps != None: found = 1 logger.debug("Retrieved %s IPAddress Objects" % (len(allIps))) for i in range(len(allIps)): if (notNull(allIps[i].getId()) and notNull(allIps[i].getHostedOnId()) and notNull(allIps[i].getIpValue()) and notNull(allIps[i].getCreated()) and notNull(allIps[i].getModified())): ## Don't add duplicate IPs if ipSet.contains(allIps[i].getId()): logger.debug("########Found duplicate IP" + allIps[i].getIpValue()) continue else: ipSet.add(allIps[i].getId()) ipMap[allIps[i].getId()] = UIp(allIps[i].getId(), allIps[i].getHostedOnId(), allIps[i].getIpSubnetId(), allIps[i].getInInterfaceId(), allIps[i].getIpValue(), allIps[i].getPrefixLength(), allIps[i].getCreated(), allIps[i].getModified()) else: break except: stacktrace = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) errMsg = 'Exception:\n %s' % stacktrace logger.error(errMsg) api.Framework.reportError(errMsg) if found: logger.debug('Created a dictionary of %d IPAddress objects' % (len(ipMap))) else: errMsg = 'Did not find any IPAddress objects' logger.debug(errMsg) api.Framework.reportError(errMsg) return ipMap
def getTopologicalOrdering(cls, forms, dependencyGraph): """ generated source for method getTopologicalOrdering """ # We want each form as a key of the dependency graph to # follow all the forms in the dependency graph, except maybe itself queue = LinkedList(forms) ordering = ArrayList(len(forms)) alreadyOrdered = HashSet() while not queue.isEmpty(): # Don't add if there are dependencies for dependency in dependencyGraph.get(curForm): if not dependency == curForm and not alreadyOrdered.contains(dependency): readyToAdd = False break # Add it if readyToAdd: ordering.add(curForm) alreadyOrdered.add(curForm) else: queue.add(curForm) # TODO: Add check for an infinite loop here # Or replace with code that does stratification of loops return ordering
def asSequence(nds): """ Take a list of nodes that are known to define a branch and return a list of nodes sorted by parent-child. """ if 1 == len(nds) or 2 == len(nds): return nds ends = [] nds = HashSet(nds) for nd in nds: children = nd.getChildren().keySet() children.retainAll(nds) # If the parent is in the set, if nds.contains(nd.parent): # ... and does not have children in the set if 0 == children.size(): # Then it's an end ends.append(nd) # Else if the parent is not in the set else: # ... but it has at least one child in the set if 1 == children.size(): # then it's an end ends.append(nd) #print "ends:", len(ends) #print ends return Node.findPath(ends[0], ends[1])
class ProcessToProcess: CONTEXT = 'processTOprocess' P2PSQL = ''' select SrcAddr, SrcPort, Prot, lpr.listen SrcListen, lpr.hostid srchid, lpr.pid srcpid, lpr.processname srcname, rpr.hostid dsthid, rpr.pid dstpid, DstAddr, DstPort, rpr.listen DstListen, rpr.processname dstname from Agg_V5 agg join Port_Process lpr on lpr.hostid = agg.hostid and lpr.ipaddress = agg.SrcAddr and lpr.port = agg.SrcPort and lpr.Protocol = agg.Prot left join Port_Process rpr on rpr.ipaddress = agg.DstAddr and rpr.port = agg.DstPort and lpr.Protocol = agg.Prot where agg.hostid = ? and (? or SrcAddr <> DstAddr) and ((rpr.hostid is null) or (lpr.hostid <> rpr.hostid) or (lpr.pid < rpr.pid)) order by srcpid ''' #explanation for where not ((lpr.hostid = rpr.hostid) and (lpr.pid > rpr.pid)): #we check for specific hostid (srchid),we know that all connections in table agg_v5 are symmetrical and order result by srcpid PROCESS_SQL = ''' with p2p as ( %s ) select hostid, pid, name, cmdline, path, params, owner, startuptime from processes where (hostid, pid) in ( select distinct srchid hostid, srcpid pid from p2p union select distinct dsthid hostid, dstpid pid from p2p where dsthid is not null ) ''' % P2PSQL def __init__(self, Framework): self.Framework = Framework self.conn = self.Framework.getProbeDatabaseConnection(ProcessToProcess.CONTEXT) self.knownPortsConfigFile = self.Framework.getConfigFile(CollectorsParameters.KEY_COLLECTORS_SERVERDATA_PORTNUMBERTOPORTNAME) self.shouldIgnoreLocal = Boolean.parseBoolean(self.Framework.getParameter('ignoreP2PLocalConnections')) self.knownListeningPorts = self.getKnownListeningPortsSet() self.requestedServices = self.getRequestedPortsSet() self.hostID = Framework.getDestinationAttribute('hostId') self.ignoredProcesses = HashSet() self.processMap = {} self.getProcessesToFilter() def getProcessesToProcess(self): if not self.shouldRun(): return rs = None try: try: self.buildProcessMap() st = self.getPreparedStatement(ProcessToProcess.P2PSQL) logger.debug(st) rs = st.executeQuery() while(rs.next()): SrcListen = rs.getBoolean('SrcListen') DstListen = rs.getBoolean('DstListen') if SrcListen and (not DstListen): self.buildTcpConnTopology(rs, 'dst', 'src') elif DstListen and (not SrcListen): self.buildTcpConnTopology(rs, 'src', 'dst') else: srcPrefered = self.isPreferedService(rs, 'src') dstPrefered = self.isPreferedService(rs, 'dst') if srcPrefered and (not dstPrefered): self.buildTcpConnTopology(rs, 'dst', 'src') elif dstPrefered and (not srcPrefered): self.buildTcpConnTopology(rs, 'src', 'dst') else: # we don't known which endpoint is listening, # so we can't set the link direction srcip = rs.getString('srcAddr') srcport = rs.getInt('srcPort') dstip = rs.getString('dstAddr') dstport = rs.getInt('dstPort') connString = '%s:%d %s:%d' % (srcip, srcport, dstip, dstport) logger.warn('process to process topology: ' 'Listen endpoint is unknown, skipping %s' % connString) except: error = 'Failed to fetch processes to process communication' logger.errorException(error) errobj = errorobject.createError(errorcodes.PROCESS_TO_PROCESS_FAILED, None, error) logger.reportErrorObject(errobj) finally: if rs: try: rs.close() except: pass self.conn.close() def buildProcessMap(self): st = self.getPreparedStatement(ProcessToProcess.PROCESS_SQL) logger.debug('Build process map by SQL:', st) rs = None try: rs = st.executeQuery() while (rs.next()): name = rs.getString('name') hostid = rs.getString('hostid') pid = rs.getInt('pid') cmdline = rs.getString('cmdline') path = rs.getString('path') params = rs.getString('params') owner = rs.getString('owner') startuptime = rs.getLong('startuptime') process = Process(hostid, pid, name, cmdline, path, params, owner, startuptime) self.processMap[process.getMapKey()] = process logger.debug(len(self.processMap), ' processes loaded.') finally: if rs: try: rs.close() except: pass self.conn.close(st) def getPreparedStatement(self, sql): st = self.conn.prepareStatement(sql) st.setString(1, self.hostID) st.setBoolean(2, not self.shouldIgnoreLocal) return st def getProcess(self, hostid, pid): """ @type hostid :str @type pid :int @rtype Process """ if hostid and pid: return self.processMap.get(Process.buildMapKey(hostid, pid)) def buildTcpConnTopology(self, rs, client, server): serverPortNum = rs.getInt(server + 'Port') if (self.requestedServices != None) and (not self.requestedServices.contains(serverPortNum)): return [_, serverProc] = self.createHostAndProc(rs, server) [clientHost, clientProc] = self.createHostAndProc(rs, client) #if not process involved in this tcp connection (hosts not support p2p or are unrichable) #we don't report connections between these hosts if (clientHost is None) or ((serverProc is None) and (clientProc is None)): return [serverPort, portName, prot] = self.createServerAddressOsh(rs, server) if prot == modeling.TCP_PROTOCOL: serviceType = 'TCP' else: serviceType = 'UDP' link = None if clientProc is not None and serverPort is not None: link = modeling.createLinkOSH('client_server', clientProc, serverPort) elif clientHost is not None and serverPort is not None: link = modeling.createLinkOSH('client_server', clientHost, serverPort) if not link: return link.setAttribute('clientserver_protocol',serviceType) if portName is not None: link.setAttribute('data_name', portName) link.setLongAttribute('clientserver_destport', serverPortNum) self.Framework.sendObject(serverPort) if clientProc is not None: self.Framework.sendObject(clientProc) self.Framework.sendObject(link) #server process and its link are interesting only if we have client connected to its server port if serverProc is not None: link = modeling.createLinkOSH('use', serverProc, serverPort) self.Framework.sendObject(serverProc) self.Framework.sendObject(link) def createHostOsh(self, rs, prefix): hid = rs.getString(prefix + 'hid') ipaddr = rs.getString(prefix + 'Addr') if ipaddr and netutils.isValidIp(ipaddr): hostOsh = modeling.createHostOSH(ipaddr, filter_client_ip=True) elif hid: hostOsh = modeling.createOshByCmdbIdString('host', hid) else: logger.debug('Not enough info to create host from network connection data %s' % prefix) hostOsh = None return hostOsh, hid def createServerAddressOsh(self, rs, prefix): ipaddr = rs.getString(prefix + 'Addr') port = rs.getInt(prefix + 'Port') portName = self.getPortName(port) [hostOsh, _] = self.createHostOsh(rs, prefix) if not hostOsh: return [None, None, None] prot = rs.getInt('Prot') if prot == modeling.TCP_PROTOCOL: serviceType = modeling.SERVICEADDRESS_TYPE_TCP else: serviceType = modeling.SERVICEADDRESS_TYPE_UDP saOsh = modeling.createServiceAddressOsh(hostOsh, ipaddr, port, serviceType, portName) return [saOsh, portName, prot] def createHostAndProc(self, rs, prefix): [hostOsh, hostid] = self.createHostOsh(rs, prefix) if not hostOsh: return [None, None] pid = rs.getInt(prefix + 'pid') process = self.getProcess(hostid, pid) procOsh = None if process: if process.name: processName = process.name else: processName = rs.getString(prefix + 'name') process.name = processName if processName and (not self.ignoredProcesses.contains(processName.lower())): procOsh = process.buildOsh(hostOsh) return [hostOsh, procOsh] def getPortName(self, port): portName = self.knownPortsConfigFile.getTcpPortName(port) if portName is None: portName = str(port) return portName def isPreferedService(self, rs, prefix): port = rs.getInt(prefix + 'Port') if self.knownListeningPorts != None and self.knownListeningPorts.contains(port): return True if self.knownListeningPorts != None and self.knownListeningPorts.contains('*'): return not (self.knownPortsConfigFile.getTcpPortName(port) is None) processName = rs.getString(prefix + 'name') if not processName: hostid = rs.getString(prefix + 'hid') pid = rs.getInt(prefix + 'pid') process = self.getProcess(hostid, pid) if process: processName = process.name return processName and processName.lower().find('oracle') > -1 def getRequestedPortsSet(self): services = self.Framework.getParameter('P2PServerPorts') if logger.isDebugEnabled(): logger.debug('Requested services:', services) if (services == None) or (len(services) == 0) or (services == '*'): return None names = services.split(',') portsSet = HashSet() for name in names: portNums = self.knownPortsConfigFile.getPortByName(name) if portNums == None: try: portNums = [Integer.parseInt(name)] except: logger.debug('Failed to resolve service port number:', name) continue for portNum in portNums: portsSet.add(portNum) return portsSet def getKnownListeningPortsSet(self): ports = self.Framework.getParameter('knownListeningPorts') portsSet = HashSet() if logger.isDebugEnabled(): logger.debug('Known Listening Ports:', ports) if (ports == None) or (len(ports) == 0): return None if (ports == '*'): portsSet.add('*') return portsSet names = ports.split(',') for name in names: portNums = self.knownPortsConfigFile.getPortByName(name) if portNums == None: try: portNums = [Integer.parseInt(name)] except: logger.debug('Failed to resolve service port number:', name) continue for portNum in portNums: portsSet.add(portNum) return portsSet def shouldRun(self): filterProcesses = self.Framework.getParameter('filterP2PProcessesByName') return (filterProcesses == None) or (filterProcesses != '*') def getProcessesToFilter(self): filterProcesses = self.Framework.getParameter('filterP2PProcessesByName') if filterProcesses != None: self.ignoredProcesses = HashSet() for procName in filterProcesses.split(','): if len(procName) > 0: self.ignoredProcesses.add(procName.lower())
# find out which partitions are purging cmd = "ls -alt /ems/shared/purge/*/*.log | awk {' print $9 }'" pids = HashSet() for line in os.popen(cmd): line = line.strip() if line.startswith("/ems/shared/purge/") and line.endswith(".log"): pid = int(line[len("/ems/shared/purge/"):line.rfind('/')]) pids.add(pid) purging = [] nonPurging = [] for p in pList: if ( p.getStorageType() == IStorageLocationManager.STORAGE_TYPE_DX ): continue if ( pids.contains(p.getId()) ): purging.append(p) else: nonPurging.append(p) conn = mc.getDBConnection(ManagementContainer.AM_POOL_NAME) needsRollback = True try: # for each purging partition, set to 100% for p in purging: needsRollback = True if ( p.getMinFreePercent() != 100 ): p.setMinFreePercent(100) pMgr.updatePartition(p,conn) conn.commit()