def testRocketFuel3967(): filepath = '../../../graphs/rocketfuel/3967/weights.intra' g, lli = utils.textToG(filepath, useInt=False) weighted = True testCases = ( ('317', '431', '010011100011001010001010001100101001001000110000100101001000',), ) failureSrcDstPairs = [] for s,d, expectedString in testCases: expectedEncodingBs = bitstring.BitString('0b' + expectedString) pp, dp = approach2.getDg(g, s, d, weighted) ## dag = approach2.getDagWithVnodes( ## pp, dp, returnDetourPathsWithVNodes=False) encodingBs = encode( pp, dp, lli, s, d, returnActualEncoding=True, roundUpToMultipleBits=1) if encodingBs != expectedEncodingBs: failureSrcDstPairs.append((s,d)) pass pass func_name = inspect.getframeinfo(inspect.currentframe())[2] print 'Test', func_name, 'result:' if len(failureSrcDstPairs) == 0: print ' passed' pass else: print ' failed' print ' The failed src-dst pairs:' for s,d in (failureSrcDstPairs): print 's,d=%s,%s' %(repr(s),repr(d)) pass pass return
def evalOneFile(filename, headerLengthThreshold, partialResultSize, outputDir, numberOfPairsToTry=None, pairIsOrdered=False, weighted=False, srcStartIdx=None, srcEndIdx=None, ): def addHdrLen(countsDict, hdrlen): if hdrlen in countsDict: countsDict[hdrlen] += 1 pass else: countsDict[hdrlen] = 1 pass return def evalOnePair(s, d, hdrLenCounts, pairsWithLargeEncodings): pp, dp = approach2.getDg(g, s, d, weighted) if (not pp): print 'no path: s,d="%s","%s"' % (s,d) return if dp == None: dp = {} pass hdrLen4 = codec4.encode( pp, dp, lli, s, d, roundUpToMultipleBits=8) if hdrLen4 > headerLengthThreshold: pairsWithLargeEncodings.append((s,d)) pass addHdrLen(hdrLenCounts, hdrLen4) return ######### startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False) allNodes = tuple(sorted(g.nodes())) numNodes = len(allNodes) i = 0 assert (numberOfPairsToTry != None) ^ (srcStartIdx != None or srcEndIdx != None) pairsWithLargeEncodings = [] hdrLenCounts = {} if numberOfPairsToTry != None and numberOfPairsToTry > 0: srcDstPairs = set() while i < numberOfPairsToTry: # this is crypto random integer idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass s,d = allNodes[idx1], allNodes[idx2] if (s,d) in srcDstPairs: # definitely skip print 'pair (%s,%s) already encountered -> skip' % (s,d) continue elif (d,s) in srcDstPairs: # not seen (s,d) yet but seen (d,s), should skip or not? if not pairIsOrdered: print 'pairs are not ordered, and (%s,%s) already encountered -> skip' % (d,s) continue pass # do this so we know we have seen this (s, d) pair srcDstPairs.add((s, d)) print 's,d="%s","%s"' % (s,d) evalOnePair(s,d, hdrLenCounts, pairsWithLargeEncodings) i += 1 pass # end while i < numberOfPairsToTry pass # end if numberOfPairsToTry > 0 elif numberOfPairsToTry != None and numberOfPairsToTry <= 0: # numberOfPairsToTry is <= 0, so we do all (un-ordered) # pairs. the graph'd better be not too big. for i in xrange(numNodes): for j in xrange(i + 1, numNodes): s = allNodes[i] d = allNodes[j] print 's,d="%s","%s"' % (s,d) evalOnePair(s, d, hdrLenCounts, pairsWithLargeEncodings) pass pass pass else: assert srcStartIdx != None or srcEndIdx != None if srcStartIdx == None: srcStartIdx = 0 pass assert 0 <= srcStartIdx < numNodes if srcEndIdx == None: srcEndIdx = numNodes - 1 pass assert 0 <= srcEndIdx < numNodes assert srcStartIdx <= srcEndIdx numSrcsProcessed = 0 curPartialSrcRange = [] for si in xrange(srcStartIdx, srcEndIdx+1): s = allNodes[si] if len(curPartialSrcRange) == 0: curPartialSrcRange.append(si) startdateSecs = int(time.time()) pass for d in allNodes: if d != s: evalOnePair(s, d, hdrLenCounts, pairsWithLargeEncodings) pass pass numSrcsProcessed += 1 if numSrcsProcessed == partialResultSize: # pickle the partial result and re-init the vars curPartialSrcRange.append(si) enddateSecs = int(time.time()) pr = PartialResult( filename, startdateSecs, enddateSecs, pairsWithLargeEncodings, hdrLenCounts=hdrLenCounts, srcIdxRange=(curPartialSrcRange[0], curPartialSrcRange[1]), ) pickleFilepath = '%s/partialResult_%u_%u' % ( outputDir, curPartialSrcRange[0], curPartialSrcRange[1]) utils.pickleStuff(pickleFilepath, pr) # re-init del hdrLenCounts, pairsWithLargeEncodings, curPartialSrcRange hdrLenCounts = {} # be < 256 (bytes) pairsWithLargeEncodings = [] curPartialSrcRange = [] numSrcsProcessed = 0 pass pass # the last bunch might not have reached partialResultSize if len(hdrLenCounts) > 0: curPartialSrcRange.append(si) enddateSecs = int(time.time()) pr = PartialResult( filename, startdateSecs, enddateSecs, pairsWithLargeEncodings, hdrLenCounts=hdrLenCounts, srcIdxRange=(curPartialSrcRange[0], curPartialSrcRange[1]), ) pickleFilepath = '%s/partialResult_%u_%u' % ( outputDir, curPartialSrcRange[0], curPartialSrcRange[1]) utils.pickleStuff(pickleFilepath, pr) pass pass return
elif o == '-o': outputFilePath = a pass elif o == '--weighted': weighted = True pass elif o == '--ordered-pairs': pairIsOrdered = True pass pass if numberOfPairsToTry < 0 or inputGraphFile == None or outputFilePath == None: usageAndExit(sys.argv[0]) pass g, lli = utils.textToG(inputGraphFile, useInt=False, ignoreWeights=not weighted) allNodes = g.nodes() numNodes = len(allNodes) srcDstPairs = set() if numberOfPairsToTry > 0: i = 0 while i < numberOfPairsToTry: # this is crypto random integer idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass
# u is at beginning of path return path[1] == v elif idx == (len(path) - 1): # u is at end of path return path[-2] == v else: # u is in middle of path return (path[idx-1] == v) or (path[idx+1] == v) pass except ValueError: pass return False filename = '../../../../graphs/rocketfuel/1239/latencies.intra' g, _ = utils.textToG(filename, useInt=True, ignoreWeights=False) allNodes = tuple(g.nodes()) numNodes = len(allNodes) allLinks = g.edges() for l0 in allLinks: for s in allNodes: for d in allNodes: if s == d: continue ppath = nx.dijkstra_path(g, s, d) if isEdgeInPath(l0, ppath):
def evalOneFile(filename, numberOfPairsToTry, pairIsOrdered=False, weighted=False): startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False) allNodes = g.nodes() numNodes = len(allNodes) i = 0 srcDstPairResults3 = {} if numberOfPairsToTry > 0: while i < numberOfPairsToTry: # this is crypto random integer idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass s,d = allNodes[idx1], allNodes[idx2] if (s,d) in srcDstPairResults3: # definitely skip print 'pair (%s,%s) already encountered -> skip' % (s,d) continue elif (d,s) in srcDstPairResults3: # not seen (s,d) yet but seen (d,s), should skip or not? if not pairIsOrdered: print 'pairs are not ordered, and (%s,%s) already encountered -> skip' % (d,s) continue pass # do this so we know we have seen this (s, d) pair srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) print 's,d="%s","%s"' % (s,d) i += 1 pass # end while i < numberOfPairsToTry pass # end if numberOfPairsToTry > 0 else: assert numberOfPairsToTry > 0 pass ########################### # now that we have the pairs we want to eval, eval them for s, d in srcDstPairResults3.keys(): #### use approach2 pp, dp = approach2.getDg(g, s, d, weighted) if (not pp) or (not dp): print 'no path: s,d="%s","%s"' % (s,d) continue numPNodes = len(pp) - 1 # don't want to count the destination fractionWithDetour = len(dp) / float(numPNodes) srcDstPairResults3[(s, d)] = SrcDstPairResult3( fractionWithDetour) pass # end while loop enddateSecs = int(time.time()) fileResult3 = FileResult3(filename, startdateSecs, enddateSecs, srcDstPairResults3) return fileResult3
def evalOneFile(filename, numberOfPairsToTry, pairIsOrdered=False, weighted=False): startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False, ignoreWeights=False) allNodes = g.nodes() numNodes = len(allNodes) i = 0 srcDstPairResults3 = {} if numberOfPairsToTry > 0: while i < numberOfPairsToTry: # this is crypto random integer idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass # yes, i do want to increment i here because maybe the graph # is too small, then the total number of possible pairs is too # small, and we will never be able to reach the # numberOfPairsToTry # i += 1 s,d = allNodes[idx1], allNodes[idx2] if (s,d) in srcDstPairResults3: # definitely skip print 'pair (%s,%s) already encountered -> skip' % (s,d) continue elif (d,s) in srcDstPairResults3: # not seen (s,d) yet but seen (d,s), should skip or not? if not pairIsOrdered: print 'pairs are not ordered, and (%s,%s) already encountered -> skip' % (d,s) continue pass # do this so we know we have seen this (s, d) pair srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) print 's,d="%s","%s"' % (s,d) i += 1 pass # end while i < numberOfPairsToTry pass # end if numberOfPairsToTry > 0 else: # numberOfPairsToTry is <= 0, so we do all (un-ordered) # pairs. the graph'd better be not too big. for i in range(numNodes - 1): for j in range(i + 1, numNodes): s = allNodes[i] d = allNodes[j] print 's,d="%s","%s"' % (s,d) srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) pass pass pass ########################### # now that we have the pairs we want to eval, eval them def computeBounds(g, s, d, primaryPath, M, pathFunction): # M is largest weight divided by smallest weight numPnodes = len(primaryPath) numPedges = numPnodes - 1 if weighted: lowerBound = (2 * numPedges) + 1 pass else: if (numPedges % 2) == 0: # even lowerBound = 2.5 * numPedges pass else: # odd lowerBound = (2.5 * numPedges) + 0.5 pass pass ### upperbound is more involved upperBound = None savedEdgesData = {} # remove edges in ppath for i in xrange(numPnodes - 1): savedEdgesData[(primaryPath[i], primaryPath[i + 1])] = \ copy.copy(g.edge[primaryPath[i]][primaryPath[i + 1]]) g.remove_edge(primaryPath[i], primaryPath[i + 1]) pass primaryPathPrime = None try: primaryPathPrime = pathFunction(g, s, d) pass except nx.exception.NetworkXError, exc: # no path pass # add back the removed edges for i in xrange(numPnodes - 1): g.add_edge(primaryPath[i], primaryPath[i + 1], attr_dict=savedEdgesData[(primaryPath[i], primaryPath[i + 1])]) pass if primaryPathPrime: numPPrimeEdges = len(primaryPathPrime) - 1 upperBound = (numPedges * ((M * numPedges) - M + (2 * numPPrimeEdges) + 2)) / 2 pass return lowerBound, upperBound
def evalOneFile(filename, numberOfPairsToTry, pairIsOrdered=False, weighted=False): startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False, ignoreWeights=(not weighted)) allNodes = g.nodes() numNodes = len(allNodes) i = 0 srcDstPairResults3 = {} if numberOfPairsToTry > 0: while i < numberOfPairsToTry: # this is crypto random integer idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass s,d = allNodes[idx1], allNodes[idx2] if (s,d) in srcDstPairResults3: # definitely skip print 'pair (%s,%s) already encountered -> skip' % (s,d) continue elif (d,s) in srcDstPairResults3: # not seen (s,d) yet but seen (d,s), should skip or not? if not pairIsOrdered: print 'pairs are not ordered, and (%s,%s) already encountered -> skip' % (d,s) continue pass # do this so we know we have seen this (s, d) pair srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) print 's,d="%s","%s"' % (s,d) i += 1 pass # end while i < numberOfPairsToTry pass # end if numberOfPairsToTry > 0 else: # numberOfPairsToTry is <= 0, so we do all (un-ordered) # pairs. the graph'd better be not too big. for i in range(numNodes - 1): for j in range(i + 1, numNodes): s = allNodes[i] d = allNodes[j] print 's,d="%s","%s"' % (s,d) srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) pass pass pass ########################### # now that we have the pairs we want to eval, eval them for s, d in srcDstPairResults3.keys(): #### use approach2 hdrLens2Normal = {} dagsizeNormal = [] hdrLens2Smaller = {} dagsizeSmaller = [] pp, dp = approach2.getDg(g, s, d, weighted=weighted, everyOtherNodeUsePredecessor=False) if (not pp) or (not dp): print 'no path: s,d="%s","%s"' % (s,d) continue for hdrlenDict, dagsizeList, onlyLaterTwoThirds \ in ((hdrLens2Normal, dagsizeNormal, False), (hdrLens2Smaller, dagsizeSmaller, True)): # if onlyLaterTwoThirds if onlyLaterTwoThirds: # remove from "dp" entries of the first 1/3 of pnodes pplen = len(pp) for pnode in pp[:int(float(pplen)/3)]: if pnode in dp: del dp[pnode] pass pass pass dag, virtualDetourPaths = approach2.getDagWithVnodes( pp, dp, returnDetourPathsWithVNodes=True) dagsizeList.append(dag.number_of_edges()) for offsetPtrAlignment in offsetPtrAlignments: hdrlenDict[offsetPtrAlignment], \ _ = approach2.getHeader2( dag, pp, virtualDetourPaths, lli, False, s, d, roundUpToMultipleBits=offsetPtrAlignment) pass pass if len(hdrLens2Smaller) == 0: assert len(hdrLens2Normal) == len(dagsizeSmaller) == len(dagsizeNormal) == 0 pass else: srcDstPairResults3[(s, d)] = SrcDstPairResult3( hdrLens2Normal, dagsizeNormal[0], hdrLens2Smaller, dagsizeSmaller[0]) pass pass # end while loop enddateSecs = int(time.time()) fileResult3 = FileResult3(filename, startdateSecs, enddateSecs, srcDstPairResults3) return fileResult3
weighted = True argvidx += 1 pass if weighted: shortestPathFunction = nx.dijkstra_path shortestPathLengthFunction = nx.dijkstra_path_length pass else: shortestPathFunction = nx.shortest_path shortestPathLengthFunction = nx.shortest_path_length pass outputdir = sys.argv[argvidx] argvidx += 1 graphFile = sys.argv[argvidx] import utils g,_ = utils.textToG(graphFile, ignoreWeights=False, useInt=False) allNodes = g.nodes() numNodes = len(allNodes) alreadySeenPairs = set() DAGRouterNotifiesSrcResults = [] DAGDstNotifiesSrcResults = [] SafeGuardResults = [] pickleEvery = 10 for i in xrange(20): idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass s,d = allNodes[idx1], allNodes[idx2]
def cmd_showPairsWithDagSizeSmallerThanLowerBound(argv): argvidx = 0 cmdname = argv[argvidx] argvidx += 1 assert 'cmd_' + cmdname == inspect.stack()[0][3] showDetails = False opts, args = getopt.getopt(argv[argvidx:], '', ['showDetails', ]) ## parse options for o, a in opts: if o == '--showDetails': showDetails = True pass pass dirpaths = args assert len(dirpaths) > 0 curGraphFilePath = None for dirpath in dirpaths: filenames = os.listdir(dirpath) for filename in filenames: filepath = dirpath + '/' + filename pr = utils.unpickleStuff(filepath) if showDetails and (pr.filename != curGraphFilePath): g, _ = utils.textToG(pr.filename, useInt=False, ignoreWeights=not pr.weighted) # calculate M for computeBounds() if pr.weighted: weights = map(lambda (u, v, edgeData): edgeData['weight'], g.edges(data=True)) maxWeight = max(weights) minWeight = min(weights) assert minWeight > 0 M = float(maxWeight) / float(minWeight) pass else: M = float(1) pass pass for stretch in stretches: for (s,d) in pr.pairsWithDagSizeSmallerThanLowerBound[stretch]: if showDetails: pp, dp = getDgWithStretch(g, s, d, pr.weighted, stretch) if dp is None: dp = {} pass dag, virtualDetourPaths = approach2.getDagWithVnodes( pp, dp, returnDetourPathsWithVNodes=True) lowerBound = computeBounds(g, s, d, pp, M, pr.weighted) print 's,d=%s,%s; #OfEdges(pp)=%u, #OfEdges(dps)=%u, lowerBound=%u, dagSize=%u' % ( repr(s), repr(d), len(pp)-1, sum(map(lambda p: len(p) - 1, dp.values())), lowerBound, dag.number_of_edges() ) pass else: print 's,d=%s,%s' % (repr(s),repr(d)) pass pass pass pass pass return
pass if dagSize < lowerBound: pairsWithDagSizeSmallerThanLowerBound[stretch].append((s,d)) pass pass return ######### startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False, ignoreWeights=False) localLinkLabelLens = {} for node in lli.keys(): if len(lli[node]) == 1: # special case, otherwise it would be length of zero. localLinkLabelLens[node] = 1 pass else: localLinkLabelLens[node] = int( math.ceil(math.log(len(lli[node]), 2))) pass pass allNodes = tuple(g.nodes()) numNodes = len(allNodes) i = 0
def evalOneFile(filename, numberOfPairsToTry, pairIsOrdered=False, weighted=False): startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False) allNodes = g.nodes() numNodes = len(allNodes) i = 0 srcDstPairResults3 = {} if numberOfPairsToTry > 0: while i < numberOfPairsToTry: # this is crypto random integer idx1 = (struct.unpack('I', os.urandom(4))[0]) % numNodes idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes while idx2 == idx1: idx2 = (struct.unpack('I', os.urandom(4))[0]) % numNodes pass s,d = allNodes[idx1], allNodes[idx2] if (s,d) in srcDstPairResults3: # definitely skip print 'pair (%s,%s) already encountered -> skip' % (s,d) continue elif (d,s) in srcDstPairResults3: # not seen (s,d) yet but seen (d,s), should skip or not? if not pairIsOrdered: print 'pairs are not ordered, and (%s,%s) already encountered -> skip' % (d,s) continue pass # do this so we know we have seen this (s, d) pair srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) print 's,d="%s","%s"' % (s,d) i += 1 pass # end while i < numberOfPairsToTry pass # end if numberOfPairsToTry > 0 else: # numberOfPairsToTry is <= 0, so we do all (un-ordered) # pairs. the graph'd better be not too big. for i in range(numNodes - 1): for j in range(i + 1, numNodes): s = allNodes[i] d = allNodes[j] print 's,d="%s","%s"' % (s,d) srcDstPairResults3[(s, d)] = None # init to None (which will # mean disconnected) pass pass pass ########################### # now that we have the pairs we want to eval, eval them for s, d in srcDstPairResults3.keys(): #### use approach2 pp, dp = approach2.getDg(g, s, d, weighted) if (not pp) or (not dp): print 'no path: s,d="%s","%s"' % (s,d) continue # how many nodes in the undirected graph? subnodes = set(pp) for path in dp.values(): subnodes.update(path) pass dag = approach2.getDagWithVnodes( pp, dp, returnDetourPathsWithVNodes=False) diff = dag.number_of_nodes() - len(subnodes) percent = (float(diff) / len(subnodes)) * 100 srcDstPairResults3[(s, d)] = SrcDstPairResult3( diff, percent) pass # end while loop enddateSecs = int(time.time()) fileResult3 = FileResult3(filename, startdateSecs, enddateSecs, srcDstPairResults3) return fileResult3
def evalOneFile(filename, headerLengthThreshold, numberOfPairsToTry, pairIsOrdered, partialResultSize, outputDir, argv, weighted, seed=None, srcStartIdx=None, srcEndIdx=None, ): def addValueCount(countsDict, value): if value in countsDict: countsDict[value] += 1 pass else: countsDict[value] = 1 pass return ##### def evalOnePair(g, lli, s, d, lowerBoundCounts, dagSize_stretchToCounts, pairsWithDagSizeSmallerThanLowerBound, codec2HdrLen_stretchToCounts, codec4HdrLen_stretchToCounts, pairsWithLargeCodec4Encodings, M): lowerBound = None for stretch in stretches: pp, dp = getDgWithStretch(g, s, d, weighted, stretch) if (not pp): print 'no path: s,d="%s","%s"' % (s,d) return if dp == None: dp = {} pass dag, virtualDetourPaths = approach2.getDagWithVnodes( pp, dp, returnDetourPathsWithVNodes=True) codec2HdrLen = codec2.encode( dag, pp, virtualDetourPaths, lli, False, s, d, useLinkIdLenPrefix=False, roundUpToMultipleBits=8)[0] addValueCount(codec2HdrLen_stretchToCounts[stretch], codec2HdrLen) codec4HdrLen = codec4.encode( pp, dp, lli, s, d, roundUpToMultipleBits=8) addValueCount(codec4HdrLen_stretchToCounts[stretch], codec4HdrLen) dagSize = dag.number_of_edges() addValueCount(dagSize_stretchToCounts[stretch], dagSize) if codec4HdrLen > headerLengthThreshold: pairsWithLargeCodec4Encodings[stretch].append((s,d)) pass if lowerBound is None: lowerBound = computeBounds(g, s, d, pp, M, weighted) addValueCount(lowerBoundCounts, lowerBound) pass if dagSize < lowerBound: pairsWithDagSizeSmallerThanLowerBound[stretch].append((s,d)) pass pass return ######### startdateSecs = int(time.time()) print ''' _______________________________________________ filename: [%s] start date: [%s] ''' % (filename, time.ctime(startdateSecs)) g, lli = utils.textToG(filename, useInt=False, ignoreWeights=False) allNodes = tuple(g.nodes()) numNodes = len(allNodes) i = 0 assert pairIsOrdered, 'un-ordered currently not supported' srcDstPairs = set() assert (numberOfPairsToTry != None) if not seed: seed = int(time.time()) pass randObj = random.Random(seed) if srcStartIdx is None: srcStartIdx = 0 pass else: assert srcStartIdx >= 0 pass if srcEndIdx is None: srcEndIdx = numNodes - 1 pass else: assert srcEndIdx <= numNodes - 1 pass assert srcStartIdx <= srcEndIdx # calculate M for computeBounds() if weighted: weights = map(lambda (u, v, edgeData): edgeData['weight'], g.edges(data=True)) maxWeight = max(weights) minWeight = min(weights) assert minWeight > 0 M = float(maxWeight) / float(minWeight) pass else: M = float(1) pass lowerBoundCounts = {} pairsWithLargeCodec4Encodings = {} pairsWithDagSizeSmallerThanLowerBound = {} dagSize_stretchToCounts = {} codec2HdrLen_stretchToCounts = {} codec4HdrLen_stretchToCounts = {} for stretch in stretches: dagSize_stretchToCounts[stretch] = {} codec2HdrLen_stretchToCounts[stretch] = {} codec4HdrLen_stretchToCounts[stretch] = {} pairsWithLargeCodec4Encodings[stretch] = [] pairsWithDagSizeSmallerThanLowerBound[stretch] = [] pass if numberOfPairsToTry > 0: # must be <= the number of possible ordered pairs assert numberOfPairsToTry <= ((srcEndIdx - srcStartIdx + 1) * (numNodes - 1)), '%u must be <= than %u' % (numberOfPairsToTry, ((srcEndIdx - srcStartIdx + 1) * (numNodes - 1))) numPairsProcessed = 0 partialResultNum = 0 i = 0 startdateSecs = int(time.time()) while i < numberOfPairsToTry: idx1 = randObj.randint(srcStartIdx, srcEndIdx) # inclusive idx2 = randObj.randint(0, numNodes - 1) while idx2 == idx1: idx2 = randObj.randint(0, numNodes - 1) pass s,d = allNodes[idx1], allNodes[idx2] if (s,d) in srcDstPairs: # definitely skip print 'pair (%s,%s) already encountered -> skip' % (s,d) continue ## elif (d,s) in srcDstPairs: ## # not seen (s,d) yet but seen (d,s), should skip or not? ## if not pairIsOrdered: ## print 'pairs are not ordered, and (%s,%s) already encountered -> skip' % (d,s) ## continue ## pass # do this so we know we have seen this (s, d) pair srcDstPairs.add((s, d)) print 's,d="%s","%s"' % (s,d) evalOnePair(g=g, lli=lli, s=s, d=d, lowerBoundCounts=lowerBoundCounts, dagSize_stretchToCounts=dagSize_stretchToCounts, pairsWithDagSizeSmallerThanLowerBound=pairsWithDagSizeSmallerThanLowerBound, codec2HdrLen_stretchToCounts=codec2HdrLen_stretchToCounts, codec4HdrLen_stretchToCounts=codec4HdrLen_stretchToCounts, pairsWithLargeCodec4Encodings=pairsWithLargeCodec4Encodings, M=M) numPairsProcessed += 1 if 0 == (numPairsProcessed % partialResultSize): # pickle the partial result and re-init the vars enddateSecs = int(time.time()) partialResultNum += 1 pr = PartialResult( filename, startdateSecs, enddateSecs, codec2HdrLen_stretchToCounts=codec2HdrLen_stretchToCounts, codec4HdrLen_stretchToCounts=codec4HdrLen_stretchToCounts, dagSize_stretchToCounts=dagSize_stretchToCounts, pairsWithLargeCodec4Encodings=pairsWithLargeCodec4Encodings, lowerBoundCounts=lowerBoundCounts, pairsWithDagSizeSmallerThanLowerBound=pairsWithDagSizeSmallerThanLowerBound, argv=argv, seed=seed, weighted=weighted) pickleFilepath = '%s/partialResult_srcIdx_%u_%u_num_%u' % ( outputDir, srcStartIdx, srcEndIdx, partialResultNum) utils.pickleStuff(pickleFilepath, pr) # re-init startdateSecs = enddateSecs lowerBoundCounts = {} pairsWithLargeCodec4Encodings = {} pairsWithDagSizeSmallerThanLowerBound = {} dagSize_stretchToCounts = {} codec2HdrLen_stretchToCounts = {} codec4HdrLen_stretchToCounts = {} for stretch in stretches: dagSize_stretchToCounts[stretch] = {} codec2HdrLen_stretchToCounts[stretch] = {} codec4HdrLen_stretchToCounts[stretch] = {} pairsWithLargeCodec4Encodings[stretch] = [] pairsWithDagSizeSmallerThanLowerBound[stretch] = [] pass pass i += 1 pass # end while i < numberOfPairsToTry pass # end if numberOfPairsToTry > 0 else: # numberOfPairsToTry is <= 0, so we do all (ordered) # pairs. the graph'd better be not too big. numPairsProcessed = 0 partialResultNum = 0 startdateSecs = int(time.time()) for i in xrange(srcStartIdx, srcEndIdx + 1): s = allNodes[i] for j in xrange(numNodes): if j == i: continue d = allNodes[j] print 's,d="%s","%s"' % (s,d) evalOnePair(g=g, lli=lli, s=s, d=d, lowerBoundCounts=lowerBoundCounts, dagSize_stretchToCounts=dagSize_stretchToCounts, pairsWithDagSizeSmallerThanLowerBound=pairsWithDagSizeSmallerThanLowerBound, codec2HdrLen_stretchToCounts=codec2HdrLen_stretchToCounts, codec4HdrLen_stretchToCounts=codec4HdrLen_stretchToCounts, pairsWithLargeCodec4Encodings=pairsWithLargeCodec4Encodings, M=M) numPairsProcessed += 1 if 0 == (numPairsProcessed % partialResultSize): # pickle the partial result and re-init the vars enddateSecs = int(time.time()) partialResultNum += 1 pr = PartialResult( filename, startdateSecs, enddateSecs, codec2HdrLen_stretchToCounts=codec2HdrLen_stretchToCounts, codec4HdrLen_stretchToCounts=codec4HdrLen_stretchToCounts, dagSize_stretchToCounts=dagSize_stretchToCounts, pairsWithLargeCodec4Encodings=pairsWithLargeCodec4Encodings, lowerBoundCounts=lowerBoundCounts, pairsWithDagSizeSmallerThanLowerBound=pairsWithDagSizeSmallerThanLowerBound, argv=argv, seed=seed, weighted=weighted) pickleFilepath = '%s/partialResult_srcIdx_%u_%u_num_%u' % ( outputDir, srcStartIdx, srcEndIdx, partialResultNum) utils.pickleStuff(pickleFilepath, pr) # re-init startdateSecs = enddateSecs lowerBoundCounts = {} pairsWithLargeCodec4Encodings = {} pairsWithDagSizeSmallerThanLowerBound = {} dagSize_stretchToCounts = {} codec2HdrLen_stretchToCounts = {} codec4HdrLen_stretchToCounts = {} for stretch in stretches: dagSize_stretchToCounts[stretch] = {} codec2HdrLen_stretchToCounts[stretch] = {} codec4HdrLen_stretchToCounts[stretch] = {} pairsWithLargeCodec4Encodings[stretch] = [] pairsWithDagSizeSmallerThanLowerBound[stretch] = [] pass pass pass pass pass # the last bunch might not have reached partialResultSize if len(lowerBoundCounts) > 0: enddateSecs = int(time.time()) partialResultNum += 1 pr = PartialResult( filename, startdateSecs, enddateSecs, codec2HdrLen_stretchToCounts=codec2HdrLen_stretchToCounts, codec4HdrLen_stretchToCounts=codec4HdrLen_stretchToCounts, dagSize_stretchToCounts=dagSize_stretchToCounts, pairsWithLargeCodec4Encodings=pairsWithLargeCodec4Encodings, lowerBoundCounts=lowerBoundCounts, pairsWithDagSizeSmallerThanLowerBound=pairsWithDagSizeSmallerThanLowerBound, argv=argv, seed=seed, weighted=weighted) pickleFilepath = '%s/partialResult_srcIdx_%u_%u_num_%u' % ( outputDir, srcStartIdx, srcEndIdx, partialResultNum) utils.pickleStuff(pickleFilepath, pr) pass return