Exemplo n.º 1
0
 def test_check_success_rate_failed(self):
     with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
             as mock_criteria:
         mock_criteria.return_value = self.criteria
         resp = functest_utils.check_success_rate(self.case_name,
                                                  3.0)
         self.assertEqual(resp, 'FAIL')
Exemplo n.º 2
0
 def test_check_success_rate_default(self):
     with mock.patch('functest.utils.functest_utils.get_criteria_by_test') \
             as mock_criteria:
         mock_criteria.return_value = self.criteria
         resp = functest_utils.check_success_rate(self.case_name,
                                                  self.success_rate)
         self.assertEqual(resp, 'PASS')
Exemplo n.º 3
0
    def parse_verifier_result(self):
        if self.VERIFICATION_ID is None:
            raise Exception('Verification UUID not found')

        cmd_line = "rally verify show --uuid {}".format(self.VERIFICATION_ID)
        logger.info("Showing result for a verification: '%s'." % cmd_line)
        p = subprocess.Popen(cmd_line,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        for line in p.stdout:
            new_line = line.replace(' ', '').split('|')
            if 'Tests' in new_line:
                break

            logger.info(line)
            if 'Testscount' in new_line:
                num_tests = new_line[2]
            elif 'Success' in new_line:
                num_success = new_line[2]
            elif 'Skipped' in new_line:
                num_skipped = new_line[2]
            elif 'Failures' in new_line:
                num_failures = new_line[2]

        try:
            num_executed = int(num_tests) - int(num_skipped)
            success_rate = 100 * int(num_success) / int(num_executed)
            with open(
                    os.path.join(conf_utils.TEMPEST_RESULTS_DIR,
                                 "tempest.log"), 'r') as logfile:
                output = logfile.read()

            error_logs = ""
            for match in re.findall('(.*?)[. ]*fail ', output):
                error_logs += match
            skipped_testcase = ""
            for match in re.findall('(.*?)[. ]*skip:', output):
                skipped_testcase += match

            self.details = {
                "tests": int(num_tests),
                "failures": int(num_failures),
                "errors": error_logs,
                "skipped": skipped_testcase
            }
        except Exception:
            success_rate = 0

        self.criteria = ft_utils.check_success_rate(self.case_name,
                                                    success_rate)
        logger.info("Tempest %s success_rate is %s%%, is marked as %s" %
                    (self.case_name, success_rate, self.criteria))
Exemplo n.º 4
0
    def parse_refstack_result(self):
        try:
            with open(
                    os.path.join(conf_utils.REFSTACK_RESULTS_DIR,
                                 "refstack.log"), 'r') as logfile:
                output = logfile.read()

            for match in re.findall("Ran: (\d+) tests in (\d+\.\d{4}) sec.",
                                    output):
                num_tests = match[0]
                logger.info("Ran: %s tests in %s sec." % (num_tests, match[1]))
            for match in re.findall("(- Passed: )(\d+)", output):
                num_success = match[1]
                logger.info("".join(match))
            for match in re.findall("(- Skipped: )(\d+)", output):
                num_skipped = match[1]
                logger.info("".join(match))
            for match in re.findall("(- Failed: )(\d+)", output):
                num_failures = match[1]
                logger.info("".join(match))
            success_testcases = ""
            for match in re.findall(r"\{0\}(.*?)[. ]*ok", output):
                success_testcases += match + ", "
            failed_testcases = ""
            for match in re.findall(r"\{0\}(.*?)[. ]*FAILED", output):
                failed_testcases += match + ", "
            skipped_testcases = ""
            for match in re.findall(r"\{0\}(.*?)[. ]*SKIPPED:", output):
                skipped_testcases += match + ", "

            num_executed = int(num_tests) - int(num_skipped)
            success_rate = 100 * int(num_success) / int(num_executed)

            self.details = {
                "tests": int(num_tests),
                "failures": int(num_failures),
                "success": success_testcases,
                "errors": failed_testcases,
                "skipped": skipped_testcases
            }
        except Exception:
            success_rate = 0

        self.criteria = ft_utils.check_success_rate(self.case_name,
                                                    success_rate)
        logger.info("Testcase %s success_rate is %s%%, is marked as %s" %
                    (self.case_name, success_rate, self.criteria))
Exemplo n.º 5
0
def main():

    GlobalVariables.nova_client = os_utils.get_nova_client()
    GlobalVariables.neutron_client = os_utils.get_neutron_client()
    cinder_client = os_utils.get_cinder_client()

    start_time = time.time()

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    GlobalVariables.SUMMARY = []

    volume_types = os_utils.list_volume_types(cinder_client, private=False)
    if not volume_types:
        volume_type = os_utils.create_volume_type(cinder_client,
                                                  CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..." %
                         CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_exists, image_id = os_utils.get_or_create_image(
        GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH, GLANCE_IMAGE_FORMAT)
    if not image_id:
        exit(-1)

    logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
    GlobalVariables.network_dict = \
        os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
                                            RALLY_PRIVATE_SUBNET_NAME,
                                            RALLY_ROUTER_NAME,
                                            RALLY_PRIVATE_SUBNET_CIDR)
    if not GlobalVariables.network_dict:
        exit(1)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in GlobalVariables.SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" + "| " + name + " | " + duration + " | " + nb_tests +
                   " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({
            'module': name,
            'details': {
                'duration': s['overall_duration'],
                'nb tests': s['nb_tests'],
                'success': s['success']
            }
        })

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)

    if len(GlobalVariables.SUMMARY):
        success_rate = total_success / len(GlobalVariables.SUMMARY)
    else:
        success_rate = 100
    success_rate = "{:0.2f}".format(success_rate)
    success_rate_str = "{0:<10}".format(str(success_rate) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + success_rate_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({
        'summary': {
            'duration': total_duration,
            'nb tests': total_nb_tests,
            'nb success': success_rate
        }
    })

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    # Evaluation of the success criteria
    status = ft_utils.check_success_rate(case_name, success_rate)

    exit_code = -1
    if status == "PASS":
        exit_code = 0

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        ft_utils.push_results_to_db("functest", case_name, start_time,
                                    stop_time, status, payload)
    if args.noclean:
        exit(exit_code)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        if not os_utils.delete_glance_image(GlobalVariables.nova_client,
                                            image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME)
        if not os_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")

    exit(exit_code)
Exemplo n.º 6
0
    def _generate_report(self):
        report = (
            "\n"
            "                                                              "
            "\n"
            "                     Rally Summary Report\n"
            "\n"
            "+===================+============+===============+===========+"
            "\n"
            "| Module            | Duration   | nb. Test Run  | Success   |"
            "\n"
            "+===================+============+===============+===========+"
            "\n")
        payload = []

        # for each scenario we draw a row for the table
        total_duration = 0.0
        total_nb_tests = 0
        total_success = 0.0
        for s in self.summary:
            name = "{0:<17}".format(s['test_name'])
            duration = float(s['overall_duration'])
            total_duration += duration
            duration = time.strftime("%M:%S", time.gmtime(duration))
            duration = "{0:<10}".format(duration)
            nb_tests = "{0:<13}".format(s['nb_tests'])
            total_nb_tests += int(s['nb_tests'])
            success = "{0:<10}".format(str(s['success']) + '%')
            total_success += float(s['success'])
            report += ("" + "| " + name + " | " + duration + " | " + nb_tests +
                       " | " + success + "|\n" +
                       "+-------------------+------------"
                       "+---------------+-----------+\n")
            payload.append({
                'module': name,
                'details': {
                    'duration': s['overall_duration'],
                    'nb tests': s['nb_tests'],
                    'success': s['success']
                }
            })

        total_duration_str = time.strftime("%H:%M:%S",
                                           time.gmtime(total_duration))
        total_duration_str2 = "{0:<10}".format(total_duration_str)
        total_nb_tests_str = "{0:<13}".format(total_nb_tests)

        if len(self.summary):
            success_rate = total_success / len(self.summary)
        else:
            success_rate = 100
        success_rate = "{:0.2f}".format(success_rate)
        success_rate_str = "{0:<10}".format(str(success_rate) + '%')
        report += ("+===================+============"
                   "+===============+===========+")
        report += "\n"
        report += ("| TOTAL:            | " + total_duration_str2 + " | " +
                   total_nb_tests_str + " | " + success_rate_str + "|\n")
        report += ("+===================+============"
                   "+===============+===========+")
        report += "\n"

        logger.info("\n" + report)
        payload.append({
            'summary': {
                'duration': total_duration,
                'nb tests': total_nb_tests,
                'nb success': success_rate
            }
        })

        self.criteria = ft_utils.check_success_rate(self.case_name,
                                                    success_rate)
        self.details = payload

        logger.info("Rally '%s' success_rate is %s%%, is marked as %s" %
                    (self.case_name, success_rate, self.criteria))
Exemplo n.º 7
0
def run_tempest(OPTION):
    #
    # the "main" function of the script which launches Rally to run Tempest
    # :param option: tempest option (smoke, ..)
    # :return: void
    #
    logger.info("Starting Tempest test suite: '%s'." % OPTION)
    start_time = time.time()
    stop_time = start_time
    cmd_line = "rally verify start " + OPTION + " --system-wide"

    header = ("Tempest environment:\n"
              "  Installer: %s\n  Scenario: %s\n  Node: %s\n  Date: %s\n" %
              (ft_constants.CI_INSTALLER_TYPE, ft_constants.CI_SCENARIO,
               ft_constants.CI_NODE, time.strftime("%a %b %d %H:%M:%S %Z %Y")))

    f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+')
    f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+')
    f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+')
    f_env.write(header)

    # subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr)
    p = subprocess.Popen(cmd_line,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=f_stderr,
                         bufsize=1)

    with p.stdout:
        for line in iter(p.stdout.readline, b''):
            if re.search("\} tempest\.", line):
                logger.info(line.replace('\n', ''))
            f_stdout.write(line)
    p.wait()

    f_stdout.close()
    f_stderr.close()
    f_env.close()

    cmd_line = "rally verify show"
    output = ""
    p = subprocess.Popen(cmd_line,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    for line in p.stdout:
        if re.search("Tests\:", line):
            break
        output += line
    logger.info(output)

    cmd_line = "rally verify list"
    cmd = os.popen(cmd_line)
    output = (((cmd.read()).splitlines()[-2]).replace(" ", "")).split("|")
    # Format:
    # | UUID | Deployment UUID | smoke | tests | failures | Created at |
    # Duration | Status  |
    num_tests = output[4]
    num_failures = output[5]
    time_start = output[6]
    duration = output[7]
    # Compute duration (lets assume it does not take more than 60 min)
    dur_min = int(duration.split(':')[1])
    dur_sec_float = float(duration.split(':')[2])
    dur_sec_int = int(round(dur_sec_float, 0))
    dur_sec_int = dur_sec_int + 60 * dur_min
    stop_time = time.time()

    try:
        diff = (int(num_tests) - int(num_failures))
        success_rate = 100 * diff / int(num_tests)
    except:
        success_rate = 0

    if 'smoke' in args.mode:
        case_name = 'tempest_smoke_serial'
    elif 'feature' in args.mode:
        case_name = args.mode.replace("feature_", "")
    else:
        case_name = 'tempest_full_parallel'

    status = ft_utils.check_success_rate(case_name, success_rate)
    logger.info("Tempest %s success_rate is %s%%, is marked as %s" %
                (case_name, success_rate, status))

    # Push results in payload of testcase
    if args.report:
        # add the test in error in the details sections
        # should be possible to do it during the test
        logger.debug("Pushing tempest results into DB...")
        with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile:
            output = myfile.read()
        error_logs = ""

        for match in re.findall('(.*?)[. ]*FAILED', output):
            error_logs += match

        # Generate json results for DB
        json_results = {
            "timestart": time_start,
            "duration": dur_sec_int,
            "tests": int(num_tests),
            "failures": int(num_failures),
            "errors": error_logs
        }
        logger.info("Results: " + str(json_results))
        # split Tempest smoke and full

        try:
            ft_utils.push_results_to_db("functest", case_name, start_time,
                                        stop_time, status, json_results)
        except:
            logger.error("Error pushing results into Database '%s'" %
                         sys.exc_info()[0])

    if status == "PASS":
        return 0
    else:
        return -1