예제 #1
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    rpos = dataFile.rfind("/")
    pos = dataFile.find("/")
    print dataFile
    dataFile = dataFile[0:pos] + dataFile[rpos + 3:]
    if len(csvData) == 0: return

    tp = []
    txns = []
    time = []
    if not dict.has_key(dataFile):
        dict[dataFile] = []

    for row in csvData:
        txn = float(row[colMap["TRANSACTIONS"]])
        t = float(row[colMap["ELAPSED"]])
        txns.append(txn)
        time.append(t)
        tp.append(txn / 5)

    dict[dataFile].append(np.mean(tp))

    print "  Average Throughput: %.2f txn/s" % np.mean(tp)
    print
예제 #2
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    rpos = dataFile.rfind("/");
    pos = dataFile.find("/");
    print dataFile
    dataFile = dataFile[0:pos] + dataFile[rpos + 3:]
    if len(csvData) == 0: return

    tp = []
    txns = []
    time = []
    if not dict.has_key(dataFile):
        dict[dataFile] = []
    
    for row in csvData:
        txn = float(row[colMap["TRANSACTIONS"]])
        t = float(row[colMap["ELAPSED"]])
        txns.append(txn)
        time.append(t)
        tp.append(txn/5) 
    
    dict[dataFile].append(np.mean(tp))

    print "  Average Throughput: %.2f txn/s" % np.mean(tp)
    print
예제 #3
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    pos = dataFile.rfind("/")
    dataFile = dataFile[pos + 3:]
    if len(csvData) == 0: return

    tp = []
    if not dict.has_key(dataFile):
        dict[dataFile] = []

    for row in csvData:
        tp.append(float(row[colMap["THROUGHPUT"]]))

    dict[dataFile].append(np.mean(tp))

    print dataFile
    print "  Average Throughput: %.2f ms" % np.mean(tp)
    print
예제 #4
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    pos = dataFile.rfind("/");
    dataFile = dataFile[pos + 3:]
    if len(csvData) == 0: return

    tp = []
    if not dict.has_key(dataFile):
        dict[dataFile] = []
    
    for row in csvData:
        tp.append(float(row[colMap["THROUGHPUT"]]))
    
    dict[dataFile].append(np.mean(tp))

    print dataFile
    print "  Average Throughput: %.2f ms" % np.mean(tp)
    print
예제 #5
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    if len(csvData) == 0: return

    allTimes = []
    allTuples = []
    allBlocks = []
    allBytes = []

    for row in csvData:
        allTimes.append(row[colMap["STOP"]] - row[colMap["START"]])
        allTuples.append(int(row[colMap["TUPLES_EVICTED"]]))
        allBlocks.append(int(row[colMap["TUPLES_EVICTED"]]))
        allBytes.append(int(row[colMap["BYTES_EVICTED"]]))

    print dataFile
    print "  Average Time: %.2f ms" % np.mean(allTimes)
    print "  Average Tuples: %.2f" % np.mean(allTuples)
    print "  Average Blocks: %.2f" % np.mean(allBlocks)
    print "  Average Bytes: %.2f MB" % (np.mean(allBytes) / float(1024 * 1024))
    print
예제 #6
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    if len(csvData) == 0: return
    
    allTimes = [ ]
    allTuples = [ ]
    allBlocks = [ ]
    allBytes = [ ]
    
    for row in csvData:
        allTimes.append(row[colMap["STOP"]] - row[colMap["START"]])
        allTuples.append(int(row[colMap["TUPLES_EVICTED"]]))
        allBlocks.append(int(row[colMap["TUPLES_EVICTED"]]))
        allBytes.append(int(row[colMap["BYTES_EVICTED"]]))
    
    print dataFile
    print "  Average Time: %.2f ms" % np.mean(allTimes)
    print "  Average Tuples: %.2f" % np.mean(allTuples)
    print "  Average Blocks: %.2f" % np.mean(allBlocks)
    print "  Average Bytes: %.2f MB" % (np.mean(allBytes)/float(1024*1024))
    print
예제 #7
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    if len(csvData) == 0: return
    pos = dataFile.rfind("/");
    dataFile = dataFile[pos + 3:]
    if len(csvData) == 0: return
    if not dictR.has_key(dataFile):
        dictR[dataFile] = []
    if not dictW.has_key(dataFile):
        dictW[dataFile] = []
    
    for row in csvData:
        read = int(row[colMap["ANTICACHE_BYTES_READ"]]) / 1024
        write = int(row[colMap["ANTICACHE_BYTES_WRITTEN"]]) / 1024

    dictR[dataFile].append(read)
    dictW[dataFile].append(write)
    
    print dataFile
    print "read: %d" % read
    print "write: %d" % write 
    print
예제 #8
0
def computeEvictionStats(dataFile):
    colMap, csvData = datautil.getCSVData(dataFile)
    if len(csvData) == 0: return
    pos = dataFile.rfind("/")
    dataFile = dataFile[pos + 3:]
    if len(csvData) == 0: return
    if not dictR.has_key(dataFile):
        dictR[dataFile] = []
    if not dictW.has_key(dataFile):
        dictW[dataFile] = []

    for row in csvData:
        read = int(row[colMap["ANTICACHE_BYTES_READ"]]) / 1024
        write = int(row[colMap["ANTICACHE_BYTES_WRITTEN"]]) / 1024

    dictR[dataFile].append(read)
    dictW[dataFile].append(write)

    print dataFile
    print "read: %d" % read
    print "write: %d" % write
    print
예제 #9
0
        for mem in memorySizes:
            for read_pct in readPcts:
#                fig = createWorkloadSkewGraphs(benchmark, mem, read_pct,
#                                               hstoreData[benchmark],
#                                               mysqlData[benchmark],
#                                               memcachedData[benchmark])
                fig = createWorkloadSkewGraphs(benchmark, mem, read_pct, noAntiCache[benchmark], hstoreData[benchmark], hstoreDataApprox[benchmark], mysqlData[benchmark], memcachedData[benchmark])
                fileName = "skew-%s-%dx-%s.pdf" % (benchmark, mem, readLabels[read_pct])
                graphutil.saveGraph(fig, fileName, height=OPT_GRAPH_HEIGHT)
                #break
            ## FOR
            #break
        ## FOR
    ## FOR

	colMap, indexTreeData = datautil.getCSVData(OPT_DATA_INDEX_TREE) 
	colMap, indexHashData = datautil.getCSVData(OPT_DATA_INDEX_HASH) 
	fig = createIndexGraph(colMap, indexHashData, indexTreeData)
	graphutil.saveGraph(fig, "index.pdf")

## LRU graph
    colMap, hstoreNoAnticacheData = datautil.getCSVData(OPT_DATA_LRU_NONE)
    colMap, hstoreSingleListData = datautil.getCSVData(OPT_DATA_LRU_SINGLE)
    colMap, hstoreDoubleListData = datautil.getCSVData(OPT_DATA_LRU_DOUBLE)

    fig = createLinkedListGraph(colMap, hstoreNoAnticacheData, hstoreSingleListData, hstoreDoubleListData)
    graphutil.saveGraph(fig, "lru.pdf")

    colMap, hstoreConstructData = datautil.getCSVData(OPT_DATA_EVICT_CONSTRUCT)
    colMap, hstoreWriteData = datautil.getCSVData(OPT_DATA_EVICT_WRITE)
    colMap, hstoreFetchData = datautil.getCSVData(OPT_DATA_EVICT_FETCH)
예제 #10
0
                fig = createWorkloadSkewGraphs(benchmark, mem, read_pct,
                                               noAntiCache[benchmark],
                                               hstoreData[benchmark],
                                               hstoreDataApprox[benchmark],
                                               mysqlData[benchmark],
                                               memcachedData[benchmark])
                fileName = "skew-%s-%dx-%s.pdf" % (benchmark, mem,
                                                   readLabels[read_pct])
                graphutil.saveGraph(fig, fileName, height=OPT_GRAPH_HEIGHT)
                #break
            ## FOR
            #break
        ## FOR
    ## FOR

        colMap, indexTreeData = datautil.getCSVData(OPT_DATA_INDEX_TREE)
        colMap, indexHashData = datautil.getCSVData(OPT_DATA_INDEX_HASH)
        fig = createIndexGraph(colMap, indexHashData, indexTreeData)
        graphutil.saveGraph(fig, "index.pdf")

## LRU graph
    colMap, hstoreNoAnticacheData = datautil.getCSVData(OPT_DATA_LRU_NONE)
    colMap, hstoreSingleListData = datautil.getCSVData(OPT_DATA_LRU_SINGLE)
    colMap, hstoreDoubleListData = datautil.getCSVData(OPT_DATA_LRU_DOUBLE)

    fig = createLinkedListGraph(colMap, hstoreNoAnticacheData,
                                hstoreSingleListData, hstoreDoubleListData)
    graphutil.saveGraph(fig, "lru.pdf")

    colMap, hstoreConstructData = datautil.getCSVData(OPT_DATA_EVICT_CONSTRUCT)
    colMap, hstoreWriteData = datautil.getCSVData(OPT_DATA_EVICT_WRITE)