Ejemplo n.º 1
0
def fixup_css(report_name):
    global need_fixup
    # Needed for VC19 and VC19 SP1.
    # From VC19 SP2 onwards a new option VCAST_RPTS_SELF_CONTAINED is used instead

    if not need_fixup:
        return

    with open(report_name, "r") as fd:
        data = fd.read()

    #fix up inline CSS because of Content Security Policy violation
    newData = data[:data.index("<style>") - 1] + """
    <link rel="stylesheet" href="vector_style.css">
    """ + data[data.index("</style>") + 8:]

    #fix up style directive because of Content Security Policy violation
    newData = newData.replace(
        "<div class='event bs-callout' style=\"position: relative\">",
        "<div class='event bs-callout relative'>")

    #fixup the inline VectorCAST image because of Content Security Policy violation
    regex_str = r"<img alt=\"Vector\".*"
    newData = re.sub(regex_str, "<img alt=\"Vector\" src=\"vectorcast.png\"/>",
                     newData)

    with open(report_name, "w") as fd:
        fd.write(newData)

    vc_scripts = os.path.join(os.getenv("WORKSPACE"), "vc_scripts")

    shutil.copy(os.path.join(vc_scripts, "vector_style.css"),
                "management/vector_style.css")
    shutil.copy(os.path.join(vc_scripts, "vectorcast.png"),
                "management/vectorcast.png")
Ejemplo n.º 2
0
def fixup_2020_reports(report_name):
    with open(report_name, "r") as fd:
        try:
            main_soup = BeautifulSoup(fd, features="lxml")
        except:
            main_soup = BeautifulSoup(fd)

    main_soup = fixup_2020_soup(main_soup)

    with open(report_name, "w") as fd:
        fd.write(main_soup.prettify(formatter="html"))
Ejemplo n.º 3
0
def runCsv2JenkinsCoverageResults(csvFilename):

    #read in the CSV file
    dataArray = readCsvFile(csvFilename)

    #parse the title to determine the coverage info
    titles = dataArray[0]
    determineCoverage(titles)

    #open the emma format file
    emmaData = ""

    #write out the header information for emma format
    emmaData += writeEmmaHeader()

    #write out the stat summary
    emmaData += writeEmmaStatSummary(dataArray[1:])

    #write out the data for the emma file
    emmaData += writeEmmaData(dataArray[1:])

    #write out the footer information for emma format
    emmaData += writeEmmaFooter()

    with open(csvFilename[:-4] + ".xml", "w") as fd:
        fd.write(emmaData)
Ejemplo n.º 4
0
def useNewAPI(FullManageProjectName, manageEnvs, level, envName, cbtDict):
    failed_count = 0

    for currentEnv in manageEnvs:
        if envName == None:
            failed_count += genDataApiReports(FullManageProjectName,
                                              manageEnvs[currentEnv], cbtDict)
            generateIndividualReports(manageEnvs[currentEnv], envName)

        elif manageEnvs[currentEnv]["env"].upper() == envName.upper():
            env_level = manageEnvs[currentEnv]["compiler"] + "/" + manageEnvs[
                currentEnv]["testsuite"]

            if env_level.upper() == level.upper():
                failed_count += genDataApiReports(FullManageProjectName,
                                                  manageEnvs[currentEnv],
                                                  cbtDict)
                generateIndividualReports(manageEnvs[currentEnv], envName)

    with open("unit_test_fail_count.txt", "w") as fd:
        failed_str = str(failed_count)
        try:
            fd.write(unicode(failed_str))
        except:
            fd.write(failed_str)
def runCombinedCov(HtmlReportName=""):

    TestResultsName = None
    CoverageResultsName = None

    # verify the html report exists
    if not os.path.isfile(HtmlReportName):
        raise IOError(HtmlReportName + ' does not exist')
        return

    # open the file and create BS4 object
    with open(HtmlReportName, "r") as fd:
        html_doc = fd.read()

    soup = BeautifulSoup(html_doc, 'html.parser')

    # find all tables and loop over
    tables = soup.findAll('table')

    # loop over all the tables in the TCMR
    for table in tables:
        # If the table doesn't have a <span> and <strong> tag -- continue
        try:
            span = table.find('span')
            title = span.find('strong')
        except:
            continue

        if str(title) == "None":
            continue
        # if the title contains Metrics in the title
        if re.search("Metrics", str(title)) is not None:
            #print "   Processing Coverage Results from: " + os.path.basename(HtmlReportName)
            procCombinedCoverageResults(HtmlReportName, table)
Ejemplo n.º 6
0
    def __get_testcase_execution_results(self, tc, classname, tc_name):
        report_name_hash = '.'.join(["execution_results", classname, tc_name])
        # Unicode-objects must be encoded before hashing in Python 3
        if sys.version_info[0] >= 3:
            report_name_hash = report_name_hash.encode('utf-8')

        report_name = hashlib.md5(report_name_hash).hexdigest()

        try:
            self.api.report(testcases=[tc],
                            single_testcase=True,
                            report_type="Demo",
                            formats=["TEXT"],
                            output_file=report_name,
                            sections=["TESTCASE_SECTIONS"],
                            testcase_sections=["EXECUTION_RESULTS"])

            with open(report_name, "r") as fd:
                out = fd.read()

            os.remove(report_name)
        except:
            out = "No execution results found"

        return out
Ejemplo n.º 7
0
def writeBlankCCFile():
    with open("coverage_results_blank.xml", "w") as fd:
        fd.write("""<report>
  <version value="3"/>
<data>
<all name="environments">
<coverage type="complexity, %" value="0% (0 / 0)"/>
</all>
</data>
</report>""")
    print("Generating a blank coverage report\n")
Ejemplo n.º 8
0
def getCompilerfromConfig(config_file):
    compiler = "Unknown_Compiler"

    with open(config_file, 'r') as fd:
        lines = fd.readlines()

    compiler = "Compiler_Not_Found"
    for line in lines:
        match = re.match(r'^C_COMPILER_HIERARCHY_STRING: (.*)$', line)
        if match is not None:
            compiler = match.group(1)
    return str(compiler)
Ejemplo n.º 9
0
def readCsvFile(csvFilename):
    global fullManageProject
    global manageProject
    global compiler
    global jobName
    global level
    global envName
    global jobNamePrefix
    global jobNameDotted

    #mode = 'r' if sys.version_info[0] >= 3 else 'rb'
    with open(csvFilename, "r") as fd:
        csvList = fd.readlines()
    os.remove(csvFilename)

    fullManageProject = csvList[0].split(",")[1].rstrip()
    (manageProject,
     ext) = os.path.splitext(os.path.basename(fullManageProject))
    envName = csvList[1].split(",")[1].rstrip()
    level = csvList[2].split(",")[1].rstrip().split('/')
    htmlFilename = csvList[3].split(",")[1].rstrip()
    if len(level) == 2:
        # Level does not include source and platform
        jobNamePrefix = '_'.join([level[0], level[1], envName])
        jobName = level[0] + "_" + level[1].rstrip()
        compiler = level[0]
    else:
        # Level includes source and platform
        jobNamePrefix = '_'.join([level[2], level[3], envName])
        jobName = level[2] + "_" + level[3].rstrip()
        compiler = level[2]

    envName = envName.replace('.', '_')

    level[0] = level[0].replace('.', '_')
    level[1] = level[1].replace('.', '_')
    if len(level) == 2:
        # Level does not include source and platform
        jobNameDotted = '.'.join([level[0], level[1], envName])
    else:
        # Level includes source and platform
        level[2] = level[2].replace('.', '_')
        level[3] = level[3].replace('.', '_')
        jobNameDotted = '.'.join([level[2], level[3], envName])

    dataArray = []
    for row in csvList[4:]:
        if row:
            data = row.strip().split(',')
            dataArray.append(data)

    return dataArray
Ejemplo n.º 10
0
def readManageVersion(ManageFile):
    version = 14
    if os.path.isfile(ManageFile + ".vcm"):
        ManageFile = ManageFile + '.vcm'
    with open(ManageFile, 'r') as projFile:
        for line in projFile:
            if 'version' in line and 'project' in line:
                version = int(re.findall(r'\d+', line)[0])
                break
    if verbose:
        print("Version of Manage project file = %d" % version)
        print("(Levels change in version 17 (*maybe) and above)")
    return version
Ejemplo n.º 11
0
def fixup_css(report_name):
    # Needed for VC19 and VC19 SP1.
    # From VC19 SP2 onwards a new option VCAST_RPTS_SELF_CONTAINED is used instead

    with open(report_name, "r") as fd:
        data = fd.read()

    # When using new option, there will be an <img src="vector_log.png"/> in the
    # generated HTML. If present, no need to do anything else.
    if '"vector_logo.png"' in data:
        return

    #fix up inline CSS because of Content Security Policy violation
    newData = data[:data.index("<style>") - 1] + """
    <link rel="stylesheet" href="normalize.css">
    <link rel="stylesheet" href="default-style.css">
    """ + data[data.index("</style>") + 8:]

    #fix up style directive because of Content Security Policy violation
    newData = newData.replace(
        "<div class='event bs-callout' style=\"position: relative\">",
        "<div class='event bs-callout relative'>")

    #fixup the inline VectorCAST image because of Content Security Policy violation
    regex_str = r"<img alt=\"Vector\".*"
    newData = re.sub(regex_str, "<img alt=\"Vector\" src=\"vectorcast.png\"/>",
                     newData)

    with open(report_name, "w") as fd:
        fd.write(newData)

    vc_scripts = os.path.join(os.getenv("WORKSPACE"), "vc_scripts")

    shutil.copy(os.path.join(vc_scripts, "normalize.css"),
                "management/normalize.css")
    shutil.copy(os.path.join(vc_scripts, "default-style.css"),
                "management/default-style.css")
    shutil.copy(os.path.join(vc_scripts, "vectorcast.png"),
                "management/vectorcast.png")
def processTotals(complexityIndex, columnTitles, info):
    # process each of the fields in the <td> tag
    dataStr = ""
    idx = 0
    myStr = "  <combined-coverage type=\"%s, %%\" value=\"%s (%s / %s)\"/>\n"

    for item in info:
        # if we haven't passed complexity yet...
        if idx == complexityIndex:
            dataStr += myStr % (columnTitles[0], "0%", item, "0")
        elif idx > complexityIndex:
            if item != BLANK:
                #split the data into covered, total, percent
                if "(" in item:
                    covered, na, total, percent = item.split()

                    # remove the () from around the percent field
                    percent = str(re.sub("[()]", "", percent))
                elif item == "100%":
                    percent = item
                    covered = "1"
                    total = "1"
                else:
                    percent = "0"
                    covered = "0"
                    total = "1"

                dataStr += myStr % (columnTitles[idx - complexityIndex],
                                    percent, covered, total)

        idx += 1
    if not os.path.exists("xml_data"):
        os.mkdir("xml_data")
    xml_file = os.path.join("xml_data", "coverage_results_top-level.xml")

    with open(xml_file, "w") as fd:
        time_tuple = time.localtime()
        date_string = time.strftime("%m/%d/%Y", time_tuple)
        time_string = time.strftime("%I:%M %p", time_tuple)
        datetime_str = date_string + "\t" + time_string
        fd.write("<!-- VectorCAST/Jenkins Integration, Generated " +
                 datetime_str + " -->\n")
        fd.write("<report>\n")
        fd.write("  <version value=\"3\"/>\n")
        fd.write(dataStr)
        fd.write("</report>\n\n")
def run(HtmlReportName="", jobName="", version=14):
    global manageVersion

    TestResultsName = None
    CoverageResultsName = None
    manageVersion = version

    # verify the html report exists
    if not os.path.isfile(HtmlReportName):
        raise IOError(HtmlReportName + ' does not exist')
        return

    # open the file and create BS4 object
    with open(HtmlReportName, "r") as fd:
        html_doc = fd.read()
        soup = BeautifulSoup(html_doc, 'html.parser')

    # find all tables and loop over
    tables = soup.findAll('table')

    # loop over all the tables in the TCMR
    for table in tables:
        # If the table doesn't have a <span> and <strong> tag -- continue
        try:
            span = table.find('span')
            title = span.find('strong')
        except:
            continue

        # if the title contains Testcase*Management in the title
        if re.search("Testcase.*Management", str(title)) is not None:
            #print "   Processing Test Case Results from: " + os.path.basename(HtmlReportName)
            TestResultsName = procTestResults(HtmlReportName, table, jobName)

        # if the title contains Metrics in the title
        if re.search("Metrics", str(title)) is not None:
            #print "   Processing Coverage Results from: " + os.path.basename(HtmlReportName)
            CoverageResultsName = procCoverageResults(HtmlReportName, table,
                                                      jobName)

    return TestResultsName, CoverageResultsName
Ejemplo n.º 14
0
def useNewAPI(FullManageProjectName, manageEnvs, level, envName, cbtDict,
              generate_exec_rpt_each_testcase, use_archive_extract,
              report_only_failures, no_full_report):

    failed_count = 0

    for currentEnv in manageEnvs:
        if skipReporting(manageEnvs[currentEnv]["build_dir"],
                         use_archive_extract, cbtDict):
            print("   No Change for " + currentEnv + ".  Skipping reporting.")
            continue

        if envName == None:
            failed_count += genDataApiReports(FullManageProjectName,
                                              manageEnvs[currentEnv], cbtDict,
                                              generate_exec_rpt_each_testcase,
                                              use_archive_extract,
                                              report_only_failures)
            if not no_full_report:
                generateIndividualReports(manageEnvs[currentEnv], envName)

        elif manageEnvs[currentEnv]["env"].upper() == envName.upper():
            env_level = manageEnvs[currentEnv]["compiler"] + "/" + manageEnvs[
                currentEnv]["testsuite"]

            if env_level.upper() == level.upper():
                failed_count += genDataApiReports(
                    FullManageProjectName, manageEnvs[currentEnv], cbtDict,
                    generate_exec_rpt_each_testcase, use_archive_extract,
                    report_only_failures)
                if not no_full_report:
                    generateIndividualReports(manageEnvs[currentEnv], envName)

    with open("unit_test_fail_count.txt", "w") as fd:
        failed_str = str(failed_count)
        try:
            fd.write(unicode(failed_str))
        except:
            fd.write(failed_str)
Ejemplo n.º 15
0
def runCsv2JenkinsTestResults(csvFilename, junit):

    dataArray = readCsvFile(csvFilename)

    titles = dataArray[0]

    junitData = ""

    junitData += writeJunitHeader(dataArray[1:])

    for data in dataArray[1:]:
        data[UNIT_NAME_COL] = escape(data[UNIT_NAME_COL])
        data[SUBPROG_COL] = escape(data[SUBPROG_COL])
        data[TEST_CASE_COL] = escape(data[TEST_CASE_COL])
        junitData += writeJunitTestCase(
            data[UNIT_NAME_COL], data[SUBPROG_COL].replace("%2C", ","),
            data[TEST_CASE_COL].replace("%2C", ","), data[TC_STATUS_COL])

    junitData += writeJunitFooter()

    with open(csvFilename[:-4] + ".xml", "w") as fd:
        fd.write(junitData)
def procTestResults(HtmlReportName, table, level):

    global manageProjectName

    #get the manage project name by getting the basename less the "
    envName = os.path.basename(HtmlReportName)[:-23]

    #setup the filename
    CsvFileName = getCsvName(HtmlReportName, level, "test_results_")

    #write out additional info
    csv_file_data = ""
    csv_file_data += ("Project," + manageProjectName + "\n")
    csv_file_data += ("Environment," + envName + "\n")
    csv_file_data += ("Level," + "/".join(level).rstrip() + "\n")
    csv_file_data += ("HtmlFilename," + HtmlReportName + "\n")

    # setup BeautifulSoup processor for input table
    tableSoup = BeautifulSoup(table.encode('ascii'), 'html.parser')

    # Get Column Titles
    columnTitles = []

    # navigate to Table's 2nd <tr> tag then to the <tr> tag inside that
    # Input Table
    #   <tr>
    #   <tr>
    #       <tr>
    # and process the children which are <td> info

    dataTable = tableSoup.tr.next_sibling.tr
    for child in dataTable.children:
        columnTitles.append(child.string.encode('ascii', 'ignore'))

    # write the titles to the CSV file
    csv_file_data += (columnTitles[UNIT_NAME_COL] + "," +
                      columnTitles[SUBPROG_COL] + "," +
                      columnTitles[TEST_CASE_COL] + "," +
                      columnTitles[TC_STATUS_COL] + "\n")

    unitName = ""
    subpName = ""

    # navigate to Table's 2nd <tr> tag then to the <tr> tag inside that
    # Input Table
    #   <tr>
    #   <tr>
    #       <tr>
    #       <tr>
    # and process the children which are <td> info

    dataEntry = tableSoup.tr.next_sibling.tr.next_sibling

    while dataEntry is not None:

        # grab the info inside each of the <td> tags
        info = [
            child.string.encode('ascii',
                                'xmlcharrefreplace').replace(NPBS, BLANK)
            for child in dataEntry.children
        ]

        # go to the next <td>
        dataEntry = dataEntry.next_sibling

        # fix up any blank fields that results from TCMR only printing unit/subprogram once
        if info[UNIT_NAME_COL] == BLANK:
            info[UNIT_NAME_COL] = unitName
        elif info[UNIT_NAME_COL] != unitName:
            unitName = info[0]

        if info[SUBPROG_COL] == BLANK:
            info[SUBPROG_COL] = subpName
        elif info[SUBPROG_COL] != subpName:
            subpName = info[SUBPROG_COL]

        # fix up <<COMPOUND>>, and <<INIT>> only having a unit, no subprogram
        if info[UNIT_NAME_COL] == '<<COMPOUND>>':
            info[SUBPROG_COL] = '<<COMPOUND>>'
        if info[UNIT_NAME_COL] == '<<INIT>>':
            info[SUBPROG_COL] = '<<INIT>>'

        # skip totals
        if 'TOTALS' in info[UNIT_NAME_COL]:
            continue
        if info[TC_STATUS_COL] == BLANK:
            continue

        # take of subprogram and test cases that have , in them like contructors
        if "," in info[SUBPROG_COL]:
            info[SUBPROG_COL] = info[SUBPROG_COL].replace(",", "%2C")
        if "," in info[TEST_CASE_COL]:
            info[TEST_CASE_COL] = info[TEST_CASE_COL].replace(",", "%2C")

        # write data to the CSV file
        csv_file_data += (info[UNIT_NAME_COL] + "," + info[SUBPROG_COL] + "," +
                          info[TEST_CASE_COL] + "," + info[TC_STATUS_COL] +
                          "\n")

    with open(CsvFileName, "w") as fd:
        try:
            fd.write(unicode(csv_file_data, "utf-8"))
        except:
            fd.write(csv_file_data)

    return CsvFileName
def procCoverageResults(HtmlReportName, table, level):
    global manageProjectName
    #get the manage project name by getting the basename less the "
    envName = os.path.basename(HtmlReportName)[:-23]

    #setup the filename
    CsvFileName = getCsvName(HtmlReportName, level, "coverage_results_")

    #write out additional info
    csv_file_data = ""
    csv_file_data += ("Project," + manageProjectName + "\n")
    csv_file_data += ("Environment," + envName + "\n")
    csv_file_data += ("Level," + "/".join(level).rstrip() + "\n")
    csv_file_data += ("HtmlFilename," + HtmlReportName + "\n")

    # setup BeautifulSoup processor for input table
    tableSoup = BeautifulSoup(table.encode('ascii'), 'html.parser')

    # Get Column Titles
    columnTitles = []

    # navigate to Table's 3rd <tr> tag then to the <tr> tag inside that
    # Input Table
    #   <tr>
    #   <tr>
    #   <tr>
    #       <tr>
    # and process the children which are <td> info
    try:
        if tableSoup.tr.next_sibling.next_sibling is None:
            dataTable = tableSoup.tr.next_sibling.tr
        else:
            dataTable = tableSoup.tr.next_sibling.next_sibling.tr
    except AttributeError:

        print("No Coverage Found")
        if os.path.isfile(CsvFileName):
            os.remove(CsvFileName)
        return None

    titleStr = ""

    # remember the complexity column index so we can split the coverage info into multiple cells in CSV
    complexityIndex = -1
    idx = 0

    try:
        # process the <td> tags
        for child in dataTable.children:

            # if we haven't found the complexity yet...
            if complexityIndex == -1:
                # write out the information directly
                titleStr = titleStr + child.string.encode('ascii',
                                                          'ignore') + ","

                # check if this field is the complexity
                if "Complexity" in child.string:
                    complexityIndex = idx
            else:
                # otherwise write it out as Covered, Total, Percent
                str = child.string.encode('ascii', 'ignore')
                titleStr = titleStr + str + " Covered," + str + " Total," + str + " Percent,"

            idx += 1
    except AttributeError as e:
        print("Error with Test Case Management Report: " + HtmlReportName)
        if os.path.isfile(CsvFileName):
            os.remove(CsvFileName)
        return None

    # write out the title information except for the trailing comma
    csv_file_data += (titleStr[:-1] + "\n")

    # navigate to Table's 3rd <tr> tag then to the <tr> tag inside that
    # Input Table
    #   <tr>
    #   <tr>
    #   <tr>
    #       <tr>
    #       <tr>
    # and process the children which are <td> info
    if tableSoup.tr.next_sibling.next_sibling is None:
        dataEntry = tableSoup.tr.next_sibling.tr.next_sibling
    else:
        dataEntry = tableSoup.tr.next_sibling.next_sibling.tr.next_sibling
    unitName = ""

    # loop over the <td> tags
    while dataEntry is not None:

        # grab the info inside each of the <td> tags
        info = [
            child.string.encode('ascii',
                                'xmlcharrefreplace').replace(NPBS, BLANK)
            for child in dataEntry.children
        ]

        # move to next <td> tag
        dataEntry = dataEntry.next_sibling

        # skip TOTALS and blank lines
        if 'TOTALS' in info[UNIT_NAME_COL]:
            continue

        # fix up any blank fields that results from TCMR only printing unit once
        if info[UNIT_NAME_COL] == BLANK:
            info[UNIT_NAME_COL] = unitName
        elif info[UNIT_NAME_COL] != unitName:
            unitName = info[0]

        if info[SUBPROG_COL] == "    Analysis" or info[
                SUBPROG_COL] == "    Execution":
            continue

        # take of subprogram and test cases that have , in them like contructors
        if "," in info[SUBPROG_COL]:
            info[SUBPROG_COL] = info[SUBPROG_COL].replace(",", "%2C")

        # process each of the fields in the <td> tag
        dataStr = ""
        idx = 0

        for item in info:
            # if we haven't passed complexity yet...
            if idx <= complexityIndex:
                # save data normally
                dataStr = dataStr + item + ","
            else:
                # else
                if item != BLANK:
                    #split the data into covered, total, percent
                    if "(" in item:
                        covered, na, total, percent = item.split()

                        # remove the () from around the percent field
                        percent = re.sub("[()]", "", percent)
                    elif item == "100%":
                        percent = item
                        covered = "1"
                        total = "1"
                    else:
                        percent = "0"
                        covered = "0"
                        total = "1"

                    dataStr = dataStr + covered + "," + total + "," + percent + ","

                else:
                    # handle blank field
                    dataStr = dataStr + "," + "," + ","

            idx += 1

        if not ', , ,' in dataStr:
            # write data to CSV file
            csv_file_data += (dataStr[:-1] + "\n")

    with open(CsvFileName, "w") as fd:
        try:
            fd.write(unicode(csv_file_data, "utf-8"))
        except:
            fd.write(csv_file_data)
    return CsvFileName
Ejemplo n.º 18
0
def buildReports(FullManageProjectName=None,
                 level=None,
                 envName=None,
                 generate_individual_reports=True,
                 timing=False,
                 cbtDict=None):

    if timing:
        print("Start: " + str(time.time()))

    saved_level = level
    saved_envName = envName

    # make sure the project exists
    if not os.path.isfile(FullManageProjectName) and not os.path.isfile(
            FullManageProjectName + ".vcm"):
        raise IOError(FullManageProjectName + ' does not exist')
        return

    manageProjectName = os.path.splitext(
        os.path.basename(FullManageProjectName))[0]

    version = readManageVersion(FullManageProjectName)
    useNewReport = checkUseNewReportsAndAPI()
    manageEnvs = {}

    if timing:
        print("Version Check: " + str(time.time()))

    with tee_print.TeePrint() as teePrint:
        cleanupOldBuilds(teePrint)

    for file in glob.glob("*.csv"):
        try:
            os.remove(file)
            if verbose:
                print("Removing file: " + file)
        except Exception as e:
            teePrint.teePrint(
                "   *INFO: File System Error removing " + file +
                ".  Check console for environment build/execution errors")
            if print_exc: traceback.print_exc()

    ### Using new data API - 2019 and beyond
    if timing:
        print("Cleanup: " + str(time.time()))
    if useNewReport and not legacy:

        try:
            shutil.rmtree("execution")
        except:
            pass
        manageEnvs = getManageEnvs(FullManageProjectName)
        if timing:
            print("Using DataAPI for reporting")
            print("Get Info: " + str(time.time()))
        useNewAPI(FullManageProjectName, manageEnvs, level, envName, cbtDict)
        if timing:
            print("XML and Individual reports: " + str(time.time()))

    ### NOT Using new data API
    else:

        # parse out the manage project name
        tcmr2csv.manageProjectName = manageProjectName

        print("Generating Test Case Management Reports")

        cmd_prefix = os.environ.get('VECTORCAST_DIR') + os.sep

        # release locks and create all Test Case Management Report
        callStr = cmd_prefix + "manage --project " + FullManageProjectName + " --force --release-locks"
        out_mgt = runManageWithWait(callStr)

        if level and envName:
            callStr = cmd_prefix + "manage --project " + FullManageProjectName + " --level " + level + " --environment " + envName + " --clicast-args report custom management"
        else:
            callStr = cmd_prefix + "manage --project " + FullManageProjectName + " --clicast-args report custom management"
        print(callStr)

        # capture the output of the manage call
        out_mgt = runManageWithWait(callStr)

        coverProjectInManageProject = False
        if "database missing or inaccessible" in out_mgt:
            coverProjectInManageProject = True
        elif re.search('Environment directory.*is missing', out_mgt):
            coverProjectInManageProject = True
        if coverProjectInManageProject:
            callStr = callStr.replace("report custom", "cover report")
            print(callStr)
            out_mgt2 = runManageWithWait(callStr)
            out_mgt = out_mgt + "\n" + out_mgt2

        if generate_individual_reports:
            print("Generating Execution Reports")
            if level and envName:
                callStr = cmd_prefix + "manage --project " + FullManageProjectName + " --level " + level + " --environment " + envName + " --clicast-args report custom actual"
            else:
                callStr = cmd_prefix + "manage --project " + FullManageProjectName + " --clicast-args report custom actual"

            print(callStr)

            out_exe = runManageWithWait(callStr)
            out = out_mgt + "\n" + out_exe
        else:
            out = out_mgt

        if verbose:
            print(out)

        # save the output of the manage command for debug purposes
        with open("build.log", "w") as fd:
            fd.write(out)

        copyList = []
        jobName = ""
        level = ""

        if timing:
            print("Using report scraping for metrics")
            print("Individual report generation: " + str(time.time()))
        if not os.path.exists("management"):
            os.mkdir("management")

        if not os.path.exists("execution"):
            os.mkdir("execution")

        #loop over each line of the manage command output
        env = None

        for line in out.split('\n'):
            # the TEST_SUITE line will give us information for building a jobName that will be
            # inserted into the CSV name so it will match with the Jenkins integration job names
            # Generated jobName ONLY used for reports in a single job
            if "COMMAND:" in line:
                info = line.split("-e ")
                env = info[1].split(" ")[0]
            if "TEST SUITE" in line:
                info = line.split(": ")
                level = info[1].split("/")
                if len(level) == 2:
                    # Level does not include source and platform
                    jobName = level[0] + "_" + level[1].rstrip()
                else:
                    # Level includes source and platform
                    jobName = level[2] + "_" + level[3].rstrip()
            if "DIRECTORY:" in line:
                directory = line.split(": ")[1].strip()

            # Get the HTML file name that was created
            if "HTML report was saved" in line:

                # strip out anything that isn't the html file name
                reportName = line.rstrip()[34:-2]

                if not os.path.isfile(reportName):
                    reportName = os.path.join(directory, env,
                                              os.path.basename(reportName))

                # setup to save the execution report
                if 'execution_results_report' in reportName:
                    print("Processing Execution Report: " + reportName)

                    if envName:
                        adjustedReportName = "execution" + os.sep + envName + "_" + jobName + ".html"
                    else:
                        adjustedReportName = "execution" + os.sep + env + "_" + jobName + ".html"

                # setup to save the management report
                if 'management_report' in reportName:

                    print("Processing Test Case Management Report: " +
                          reportName)

                    # Create the test_results_ and coverage_results_ csv files
                    testResultName, coverageResultsName = tcmr2csv.run(
                        reportName, level, version)

                    vcastcsv2jenkins.run(
                        test=testResultName,
                        coverage=coverageResultsName,
                        useExecRpt=generate_individual_reports,
                        version=version)

                    if envName:
                        adjustedReportName = "management" + os.sep + jobName + "_" + envName + ".html"
                    else:
                        adjustedReportName = "management" + os.sep + jobName + "_" + env + ".html"

                # Create a list for later to copy the files over
                copyList.append([reportName, adjustedReportName])
                # Reset env
                env = None

        if coverProjectInManageProject:
            generate_qa_results_xml.genQATestResults(FullManageProjectName,
                                                     saved_level,
                                                     saved_envName)

        failed_count = 0
        try:
            for file in glob.glob("xml_data/test_results_*.xml"):
                with open(file, "r") as fd:
                    lines = fd.readlines()

                for line in lines:
                    if "failures" in line:
                        failed_count += int(line.split("\"")[5])
                        break
        except:
            teePrint.teePrint(
                "   *INFO: Problem parsing test results file for unit testcase failure count: "
                + file)
            if print_exc: traceback.print_exc()

        with open("unit_test_fail_count.txt", "w") as fd:
            failed_str = str(failed_count)
            try:
                fd.write(unicode(failed_str))
            except:
                fd.write(failed_str)

        for file in copyList:

            if verbose:
                print("moving %s -> %s" % (file[0], file[1]))

            shutil.move(file[0], file[1])

    if timing:
        print("QA Results reports: " + str(time.time()))

    if timing:
        print("Complete: " + str(time.time()))
def parse_html_files(mpName):

    if os.path.exists(mpName + "_rebuild.html"):
        os.remove(mpName + "_rebuild.html")

    report_file_list = []
    full_file_list = os.listdir(".")
    for file in full_file_list:
        if "_rebuild.html" in file:
            report_file_list.append(file)

    if len(report_file_list) == 0:
        print(
            "No incrementatal rebuild reports found in the workspace...skipping"
        )
        return

    with open(report_file_list[0], "r") as fd:
        try:
            main_soup = BeautifulSoup((fd), features="lxml")
        except:
            main_soup = BeautifulSoup(fd)

    preserved_count = 0
    executed_count = 0
    total_count = 0

    if main_soup.find(id="report-title"):
        main_manage_api_report = True
        # New Manage reports have div with id=report-title
        # Want second table (skip config data section)
        main_row_list = main_soup.find_all('table')[1].tr.find_next_siblings()
        main_count_list = main_row_list[-1].th.find_next_siblings()
    else:
        main_manage_api_report = False
        main_row_list = main_soup.table.table.tr.find_next_siblings()
        main_count_list = main_row_list[-1].td.find_next_siblings()

    preserved_count = preserved_count + int(main_count_list[1].get_text())
    executed_count = executed_count + int(main_count_list[2].get_text())
    total_count = total_count + int(main_count_list[3].get_text())
    if main_manage_api_report:
        build_success, build_total = [
            int(s.strip()) for s in
            main_count_list[0].get_text().strip().split('(')[0][:-1].split('/')
        ]
    else:
        build_success, build_total = [
            int(s.strip())
            for s in main_count_list[0].get_text().strip().split('(')[-1]
            [:-1].split('/')
        ]

    insert_idx = 2
    for file in report_file_list[1:]:
        with open(file, "r") as fd:
            try:
                soup = BeautifulSoup((fd), features="lxml")
            except:
                soup = BeautifulSoup(fd)

        if soup.find(id="report-title"):
            manage_api_report = True
            # New Manage reports have div with id=report-title
            # Want second table (skip config data section)
            row_list = soup.find_all('table')[1].tr.find_next_siblings()
            count_list = row_list[-1].th.find_next_siblings()
        else:
            manage_api_report = False
            row_list = soup.table.table.tr.find_next_siblings()
            count_list = row_list[-1].td.find_next_siblings()
        for item in row_list[:-1]:
            if manage_api_report:
                main_soup.find_all('table')[1].insert(insert_idx, item)
            else:
                main_soup.table.table.insert(insert_idx, item)
            insert_idx = insert_idx + 1
        preserved_count = preserved_count + int(count_list[1].get_text())
        executed_count = executed_count + int(count_list[2].get_text())
        total_count = total_count + int(count_list[3].get_text())
        if manage_api_report:
            build_totals = [
                int(s.strip()) for s in count_list[0].get_text().strip().split(
                    '(')[0][:-1].split('/')
            ]
        else:
            build_totals = [
                int(s.strip()) for s in count_list[0].get_text().strip().split(
                    '(')[-1][:-1].split('/')
            ]
        build_success = build_success + build_totals[0]
        build_total = build_total + build_totals[1]

    try:
        percentage = build_success * 100 // build_total
    except:
        percentage = 0
    if main_manage_api_report:
        main_row_list = main_soup.find_all('table')[1].tr.find_next_siblings()
        main_count_list = main_row_list[-1].th.find_next_siblings()
        main_count_list[0].string.replace_with(
            str(build_success) + " / " + str(build_total) + " (" +
            str(percentage) + "%)")
    else:
        main_row_list = main_soup.table.table.tr.find_next_siblings()
        main_count_list = main_row_list[-1].td.find_next_siblings()
        main_count_list[0].string.replace_with(
            str(percentage) + "% (" + str(build_success) + " / " +
            str(build_total) + ")")

    main_count_list[1].string.replace_with(str(preserved_count))
    main_count_list[2].string.replace_with(str(executed_count))
    main_count_list[3].string.replace_with(str(total_count))

    # remove the table of content because the >v icon is messing stuff up and its pointless in this report
    for div in main_soup.find_all("div", {'class': 'contents-block'}):
        div.decompose()

    #<div class="report-body no-toc" id="main-scroller">
    div = main_soup.find("div", {'class': 'report-body'})
    if div:
        div['class'] = "report-body no-toc"

    with open(mpName + "_rebuild.html", "w") as fd:
        fd.write(main_soup.prettify(formatter="html"))

    import fixup_reports
    main_soup = fixup_reports.fixup_2020_soup(main_soup)

    # moving rebuild reports down in to a sub directory
    with open("combined_incr_rebuild.tmp", "w") as fd:
        fd.write(main_soup.prettify(formatter="html"))

    # moving rebuild reports down in to a sub directory
    if not os.path.exists("rebuild_reports"):
        os.mkdir("rebuild_reports")
    for file in report_file_list:
        if mpName + "_rebuild.html" in file:
            continue
        if os.path.exists(file):
            shutil.move(file, "rebuild_reports/" + file)

    # copy the CSS and PNG files for manage rebuild reports...if available
    import glob
    for file in glob.glob("*.css"):
        shutil.copy(file, "rebuild_reports/" + file)
    for file in glob.glob("*.png"):
        shutil.copy(file, "rebuild_reports/" + file)
Ejemplo n.º 20
0
    parser.add_argument('--buildlog', help='Build Log for CBT Statitics')

    args = parser.parse_args()

    legacy = args.legacy

    if legacy and sys.version_info[0] >= 3:
        print(
            "Legacy mode testing not support with Python3 (VectorCAST 2021 and above)"
        )
        sys.exit(-1)

    try:
        tool_version = os.path.join(os.environ['VECTORCAST_DIR'], "DATA",
                                    "tool_version.txt")
        with open(tool_version, "r") as fd:
            ver = fd.read()

        if "19.sp1" in ver:
            # custom report patch for SP1 problem - should be fixed in future release
            old_init = CustomReport._post_init

            def new_init(self):
                old_init(self)
                self.context['report']['use_all_testcases'] = True

            CustomReport._post_init = new_init
    except:
        pass

    tcmr2csv.useLocalCsv = True
Ejemplo n.º 21
0
def write_xml(file, xml_content, mode):
    with open(file, mode) as fd:
        fd.write(xmlToString(xml_content))
def parse_text_files(mpName):
    header = """
--------------------------------------------------------------------------------
Manage Incremental Rebuild Report
--------------------------------------------------------------------------------



--------------------------------------------------------------------------------
Environments Affected
--------------------------------------------------------------------------------
  -------------------------------------------------------------------------------
  Environment           Rebuild Status              Unaffecte Affected  Total Tes
                                                    d Tests   Tests     ts
  -------------------------------------------------------------------------------
"""

    report_file_list = []
    full_file_list = os.listdir(".")
    for file in full_file_list:
        if "_rebuild.txt" in file:
            print(file)
            report_file_list.append(file)

    rebuild_count = 0
    rebuild_total = 0
    preserved_count = 0
    executed_count = 0
    total_count = 0

    outStr = ""

    for file in report_file_list:
        print("processing file: " + file)
        sepCount = 0
        with open(file, "r") as fd:
            lines = fd.readlines()
        for line in lines:
            if re.search("^  Totals", line):
                totals = line.replace("(", "").replace(")", "").split()
                rebuild_count += int(totals[2])
                rebuild_total += int(totals[4])
                preserved_count += int(totals[5])
                executed_count += int(totals[6])
                total_count += int(totals[7])
            if "--------" in line:
                sepCount += 1
            elif sepCount == 6:
                outStr += line

    try:
        percentage = rebuild_count * 100 // rebuild_total
    except:
        percentage = 0

    totalStr = "\n  -------------------------------------------------------------------------------"
    template = "\nTotals                  %3d%% (%4d / %4d)          %9d %9d %9d"
    totalStr += template % (percentage, rebuild_count, rebuild_total,
                            preserved_count, executed_count, total_count)

    with open(mpName + "_rebuild.txt", "w") as fd:
        fd.write(header + outStr + totalStr)

    # moving rebuild reports down in to a sub directory
    if not os.path.exists("rebuild_reports"):
        os.mkdir("rebuild_reports")
    for file in report_file_list:
        if os.path.exists(file):
            shutil.move(file, "rebuild_reports/" + file)
                        tc = line.split("Running: ")[-1]     
                        tc_name = fileName + "/" + func + "/" + tc
                        currTestNdx = simpleTestIndex                        
                        self.environmentDict[hashCode][currTestNdx][tc_name] = [start_dto, None]
                elif "There are no slots in compound test" in line:
                    ##     There are no slots in compound test <<COMPOUND>>.FailNo_Slots.
                    tc_name = line.split(" ")[-1][:-1]
                    currTestNdx = compoundTestIndex                        
                    self.environmentDict[hashCode][currTestNdx][tc_name] = [start_dto, None]

                elif "All slots in compound test" in line:
                    ##     There are no slots in compound test <<COMPOUND>>.FailNo_Slots.
                    tc_name = line.split(" ")[5]
                    currTestNdx = compoundTestIndex                        
                    self.environmentDict[hashCode][currTestNdx][tc_name] = [start_dto, None]

        if self.verbose:
            pprint(self.environmentDict, width=132)
            
        return self.environmentDict           

if __name__ == '__main__':
    
    with open(sys.argv[1],"r") as fd:
        buildLogData = fd.readlines()
        
    parser = ParseConsoleForCBT(True)
    parser.parse(buildLogData)
    #pprint(parser.parse(buildLogData), width=132)
    
Ejemplo n.º 24
0
 def end_cov_file(self):
     self.fh_data += ('</report>')
     with open(self.cover_report_name, "w") as fd:
         fd.write(self.fh_data)
Ejemplo n.º 25
0
 def end_test_results_file(self):
     self.fh_data += ("   </testsuite>\n")
     self.fh_data += ("</testsuites>\n")
     with open(self.unit_report_name, "w") as fd:
         fd.write(self.fh_data)