示例#1
0
def sysbench(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search('\[([\S\ ]+)\]\n', case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)
            caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        tables = []
        tableContent = {}
        centerTopGroup = re.search("(sysbench\s\d[\S\ ]+\n)", case)
        tableContent[parser_log.CENTER_TOP] = centerTopGroup.groups()[0]
        tableGroup = re.search("Threads started\!([\s\S]+)\/\d+\.\d+\n", case)
        if tableGroup is not None:
            tableGroupContent_temp = tableGroup.groups()[0].strip()
            tableGroupContent = re.sub('\+?sysbench[\-\ ]?0.5[\S\ ]+\n', '',
                                       tableGroupContent_temp)
            tableGroupContent_temp2 = re.sub('\+', '', tableGroupContent)
            table = parser_log.parseTable(tableGroupContent_temp2, ":{1,}")
            tableContent[parser_log.I_TABLE] = table
        tables.append(tableContent)
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
示例#2
0
def coremark(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search("\[test:([\s\S]+?)\]", case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)

        tables = []
        tableContent = {}
        centerTopGroup = re.search("log:[\s\S]*?\n([\s\S]+for coremark\.)?\n",
                                   case)
        tableContent[parser_log.CENTER_TOP] = centerTopGroup.groups()[0]
        tableGroup = re.search("for coremark\.\n([\s\S]+)\nMemory location",
                               case)
        if tableGroup is not None:
            tableGroupContent = tableGroup.groups()[0].strip()
            table = parser_log.parseTable(tableGroupContent, ":{1,}")
            tableContent[parser_log.I_TABLE] = table
        tables.append(tableContent)
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
示例#3
0
def fio(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search('\[test:([\s\S]+)\.\.\.', case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)
            caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        tables = []
        tableContent = {}
        centerTopGroup = re.search("(fio\-[\s\S]+\s20\d\d\n)", case)
        tableContent[parser_log.CENTER_TOP] = centerTopGroup.groups()[0]

        tableGroup = re.search("\s20\d\d\n([\s\S]+)\[status\]", case)
        if tableGroup is not None:
            tableGroupContent = tableGroup.groups()[0].strip()
            tableGroupContent_temp = re.sub("(clat percentiles[\s\S]+\]\n)",
                                            "", tableGroupContent)
            table = parser_log.parseTable(tableGroupContent_temp, ":{1,}")
            tableContent[parser_log.I_TABLE] = table
        tables.append(tableContent)
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
def speccpu(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search(
            '(\s+Base\s+Base\s+Base\s+Peak\s+Peak\s+Peak\nBenchmarks\s+Ref\.\s+Run Time\s+Ratio\s+Ref\.\s+Run Time\s+Ratio)',
            case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)
            caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        tables = []
        tableContent = {}
        tableContent[parser_log.CENTER_TOP] = ''
        # tableGroup = re.search("(.*)SPECint\(", case)
        tableGroup = re.search(r'=======([\s\S]+)SPECint\(R\)_base2006', case)
        if tableGroup is not None:
            tableGroupContent_temp = tableGroup.groups()[0]
            table = parser_log.parseTable(tableGroupContent_temp, ":{1,}")
            tableContent[parser_log.I_TABLE] = table
        tables.append(tableContent)
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
def tinymembench(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search("\[test:([\s\S]+?)\]", case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)

        tables = []
        tableContent = {}

        tableContent2 = re.findall("={4,}\n==[\s\S]+?[\n\r]{3,}", case)
        for centerTop2 in tableContent2:
            top = re.search("={4,}\n[\s\S]+?={4,}", centerTop2)
            if top is not None:
                tableContent[parser_log.CENTER_TOP] = top.group(0)
            table2 = re.search("={4,}[\n\r]{2,}([\s\S]+?)[\n\r]{3,}",
                               centerTop2)
            if table2 is not None:
                table2Content = re.sub("---", "", table2.groups()[0])
                tableContent[parser_log.I_TABLE] = parser_log.parseTable(
                    table2Content, ":")
            tables.append(copy.deepcopy(tableContent))

        leftStr = re.sub("={4,}\n==[\s\S]+?[\n\r]{3,}", "", case)

        tableContent1 = re.findall("={4,}\n[\s\S]+?\[status\]", leftStr)
        for centerTop1 in tableContent1:
            top = re.search("={4,}\n[\s\S]+?={4,}", centerTop1)
            if top is not None:
                tableContent[parser_log.CENTER_TOP] = top.group(0)
            table1 = re.search("={4,}[\n\r]{2,}([\s\S]+?)[\n\r]{3,}",
                               centerTop1)
            if table1 is not None:
                table1Content = re.sub("---", "", table1.groups()[0])
                tableContent[parser_log.I_TABLE] = parser_log.parseTable(
                    table1Content, ":")
            tables.append(copy.deepcopy(tableContent))
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
示例#6
0
def cachebench(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search("\[test:([\s\S]+?)\]", case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)

        tables = []
        tableContent = {}
        tableContent[parser_log.CENTER_TOP] = ''
        tableGroup = re.search("log:[\s\S]*?\n([\s\S]+)\[status\]", case)
        if tableGroup is not None:
            tableGroupContent = tableGroup.groups()[0].strip()
            table = parser_log.parseTable(tableGroupContent, "\\s{1,}")
            tableContent[parser_log.I_TABLE] = table
        tables.append(tableContent)
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
示例#7
0
def lmbench(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        tables = []
        content_dict = {}
        titleGroup = re.search("\[test:([\s\S]+?)\]", case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)
            tool = titleGroup.groups()[0].strip()
            if tool == "lmbench lat":
                tableGroup = re.search(
                    "Simple syscall([\s\S]+)lat_pagefault:[\s\S]+?\n", case)
                if tableGroup is not None:
                    centerTopGroup = re.search(
                        "\[lmbench([\s\S]+)\[VERSION([\s\S]+?)\]", case)
                    if centerTopGroup != None:
                        content_dict[
                            parser_log.CENTER_TOP] = centerTopGroup.group(0)
                    tableGroupContent = tableGroup.group(0)
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        tableGroupContent, ":")
                    tables.append(copy.deepcopy(content_dict))

                mappingGroup = re.search("\"mappings([\s\S]+?)\"File", case)
                if mappingGroup is not None:
                    content_dict[parser_log.CENTER_TOP] = "mappings"
                    mappingGroupContent = mappingGroup.groups()[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        mappingGroupContent, " ")
                    tables.append(copy.deepcopy(content_dict))

                fsLatencyGroup = re.search(
                    "\"File system latency([\s\S]+?)UDP latency", case)
                if fsLatencyGroup is not None:
                    content_dict[parser_log.CENTER_TOP] = "File system latency"
                    fsLatencyGroupContent = fsLatencyGroup.groups()[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        fsLatencyGroupContent, "\\s{1,}|\t")
                    tables.append(copy.deepcopy(content_dict))
                    # UDP latency
                udpLatencyGroup = re.search("UDP latency([\s\S]+?)[\n\r]{3,}",
                                            case)
                if udpLatencyGroup is not None:
                    content_dict[parser_log.CENTER_TOP] = ""
                    udpLatencyGroupContent = udpLatencyGroup.group(0)
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        udpLatencyGroupContent, ":")
                    tables.append(copy.deepcopy(content_dict))

                sizeGroups = re.findall("\"(size=[\s\S]+?\n)([\s\S]+?)\\n{2,}",
                                        case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))

                # Memory load parallelism
                memoryLoadGroup = re.search(
                    "STREAM copy[\s\S]+STREAM2[\s\S]+?\n", case)
                if memoryLoadGroup is not None:
                    content_dict[
                        parser_log.CENTER_TOP] = "Memory load parallelism"
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        memoryLoadGroup.group(0), ":")
                    tables.append(copy.deepcopy(content_dict))
                # Memory load latency
                mlLatencyContentGroups = re.search(
                    "Memory load latency([\s\S]+?)[\n\r]{3,}", case)
                if mlLatencyContentGroups is not None:
                    mlLatencyContent = mlLatencyContentGroups.group(0)
                    mlLatencyGroups = re.findall(
                        "\"(stride=[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}",
                        mlLatencyContent)
                    for index, mlLatencyGroup in enumerate(mlLatencyGroups):
                        if index == 0:
                            content_dict[
                                parser_log.
                                CENTER_TOP] = "Memory load latency\n" + mlLatencyGroup[
                                    0]
                        else:
                            content_dict[
                                parser_log.CENTER_TOP] = mlLatencyGroup[0]
                        content_dict[
                            parser_log.I_TABLE] = parser_log.parseTable(
                                mlLatencyGroup[1], " ")
                        tables.append(copy.deepcopy(content_dict))
                # Random load latency
                rlLatencyContentGroups = re.search(
                    "Random load latency([\s\S]+?)[\n\r]{3,}", case)
                if rlLatencyContentGroups is not None:
                    rlLatencyContent = rlLatencyContentGroups.group(0)
                    rlLatencyGroups = re.findall(
                        "\"(stride=[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}",
                        rlLatencyContent)
                    for index, rlLatencyGroup in enumerate(rlLatencyGroups):
                        if index == 0:
                            content_dict[
                                parser_log.
                                CENTER_TOP] = "Random load latency\n" + rlLatencyGroup[
                                    0]
                        else:
                            content_dict[
                                parser_log.CENTER_TOP] = rlLatencyGroup[0]
                        content_dict[
                            parser_log.I_TABLE] = parser_log.parseTable(
                                rlLatencyGroup[1], " ")
                        tables.append(copy.deepcopy(content_dict))
                caseDict[parser_log.TABLES] = tables
            if tool == "lmbench bandwidth":
                tableGroup = re.search("File([\s\S]+)lat_pagefault:[\s\S]+?\n",
                                       case)
                if tableGroup is not None:
                    centerTopGroup = re.search(
                        "\[lmbench([\s\S]+)\[VERSION([\s\S]+?)\]", case)
                    if centerTopGroup != None:
                        content_dict[
                            parser_log.CENTER_TOP] = centerTopGroup.group(0)
                    tableGroupContent = tableGroup.group(0)
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        tableGroupContent, ":")
                    tables.append(copy.deepcopy(content_dict))
                mappingGroup = re.search("\"mappings([\s\S]+?)\"File", case)
                if mappingGroup is not None:
                    content_dict[parser_log.CENTER_TOP] = "mappings"
                    mappingGroupContent = mappingGroup.groups()[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        mappingGroupContent, " ")
                    tables.append(copy.deepcopy(content_dict))

                fsLatencyGroup = re.search(
                    "\"File system latency([\s\S]+?)[\n\r]{2,}", case)
                if fsLatencyGroup is not None:
                    content_dict[parser_log.CENTER_TOP] = "File system latency"
                    fsLatencyGroupContent = fsLatencyGroup.groups()[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        fsLatencyGroupContent, "\\s{1,}|\t")
                    tables.append(copy.deepcopy(content_dict))
                # UDP latency
                udpLatencyGroup = re.search("UDP latency([\s\S]+?)[\n\r]{2,}",
                                            case)
                if udpLatencyGroup is not None:
                    content_dict[parser_log.CENTER_TOP] = ""
                    udpLatencyGroupContent = udpLatencyGroup.group(0)
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        udpLatencyGroupContent, ":")
                    tables.append(copy.deepcopy(content_dict))

                # Socket bandwidth using localhost
                sbGroups = re.search(
                    "Socket bandwidth using localhost([\s\S]+?)[\n\r]{2,}",
                    case)
                if sbGroups is not None:
                    sbGroupContent = sbGroups.groups()[0]
                    content_dict[parser_log.CENTER_TOP] = "11111"
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sbGroupContent, "\\s{1,}", 1)
                    tables.append(copy.deepcopy(content_dict))
                # Avg xfer
                sbGroups = re.search("Avg xfer([\s\S]+?)[\n\r]{2,}", case)
                if sbGroups is not None:
                    sbGroupContent = sbGroups.group(0)
                    content_dict[parser_log.CENTER_TOP] = ""
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sbGroupContent, ":")
                    tables.append(copy.deepcopy(content_dict))

                sizeGroups = re.findall(
                    "\"(read[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))

                sizeGroups = re.findall(
                    "\"(Mmap[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))

                sizeGroups = re.findall(
                    "\"(libc[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))

                sizeGroups = re.findall(
                    "(Memory[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))

                sizeGroups = re.findall(
                    "\"(unrolled[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))
                caseDict[parser_log.TABLES] = tables
            if tool == "lmbench latency local_die_1_core":
                groups = re.search(
                    "Local memory[\s\S]+\"stride=([\s\S]+?)[\n]", case)
                if groups is not None:
                    content_dict[parser_log.CENTER_TOP] = groups.group(0)
                sizeGroups = re.findall(
                    "\"(stride[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))
                caseDict[parser_log.TABLES] = tables

            if tool == "lmbench latency local_die_4_core":
                groups = re.search(
                    "Local memory[\s\S]+\"stride=([\s\S]+?)[\n]", case)
                if groups is not None:
                    content_dict[parser_log.CENTER_TOP] = groups.group(0)
                sizeGroups = re.findall(
                    "\"(stride[\s\S]+?\n)([\s\S]+?)[\n\r]{2,}", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))
                caseDict[parser_log.TABLES] = tables

            if tool == "lmbench bandwidth local_die_1_core" or tool == "lmbench bandwidth local_die_4_core" \
                    or tool == "lmbench bandwidth local_die_16_core" or tool == "lmbench bandwidth local_die_32_core" \
                    or tool == "lmbench bandwidth local_die_64_core":
                sizeGroups = re.findall(
                    "\"([\s\S]+?\\nCommand[\s\S]+?\\n)([\s\S]+?\\n)", case)
                for sizeGroup in sizeGroups:
                    content_dict[parser_log.CENTER_TOP] = sizeGroup[0]
                    content_dict[parser_log.I_TABLE] = parser_log.parseTable(
                        sizeGroup[1], " ")
                    tables.append(copy.deepcopy(content_dict))
                caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    outfp.write(json.dumps(result))
    return result
示例#8
0
def openssl(filePath, outfp):
    cases = parser_log.parseData(filePath)
    result = []
    for case in cases:
        caseDict = {}
        caseDict[parser_log.BOTTOM] = parser_log.getBottom(case)
        titleGroup = re.search("\[test:([\s\S]+?)\]", case)
        if titleGroup != None:
            caseDict[parser_log.TOP] = titleGroup.group(0)

        tables = []
        tableContent = {}
        tc = re.search("(The 'numbers'[\s\S]+?\n)([\s\S]+k\n)", case)
        if tc is not None:
            tableContent[parser_log.CENTER_TOP] = tc.groups()[0]
            tableContent[parser_log.TABLE] = parser_log.parseTable(tc.groups()[1], "\\s{2,}")
            tables.append(copy.deepcopy(tableContent))

        tc1 = re.findall("(sign\\s{1,}verify\\s{1,}sign/s\\s{1,}verify/s\n)([\s\S]+?\n)\\s{5,}", case)
        for group in tc1:
            table = []
            td_title = []
            td_title.append("")
            for table_title in re.split("\\s{1,}", group[0]):
                if table_title.strip() != "":
                    td_title.append(table_title)
            table.append(td_title)
            for line in group[1].splitlines():
                td1_group = re.search("[\s\S]+?bits|[\s\S]+?\)", line)
                td1 = td1_group.group(0)
                now = line.replace(td1, "")
                td = []
                td.append(td1)
                for cell in re.split("\\s{1,}", now):
                    if cell.strip() != "":
                        td.append(cell)
                if len(td) > 0:
                    table.append(td)
            tableContent[parser_log.CENTER_TOP] = ""
            tableContent[parser_log.TABLE] = table
            tables.append(copy.deepcopy(tableContent))

        op = re.findall("(op\\s{1,}op/s\n)([\s\S]+?\n)\[status\]", case)
        for op_group in op:
            table = []
            td_title = []
            td_title.append("")
            for table_title in re.split("\\s{1,}", op_group[0]):
                if table_title.strip() != "":
                    td_title.append(table_title)
            table.append(td_title)
            for line in op_group[1].splitlines():
                td1_group = re.search("[\s\S]+?\)", line)
                td1 = td1_group.group(0)
                now = line.replace(td1, "")
                td = []
                td.append(td1)
                for cell in re.split("\\s{1,}", now):
                    if cell.strip() != "":
                        td.append(cell)
                if len(td) > 0:
                    table.append(td)
            tableContent[parser_log.CENTER_TOP] = ""
            tableContent[parser_log.TABLE] = table
            tables.append(copy.deepcopy(tableContent))
        caseDict[parser_log.TABLES] = tables
        result.append(caseDict)
    result = json.dumps(result)
    outfp.write(result)
    return result