예제 #1
0
def pytest_runtest_teardown(item):
    item.end_time = datetime.utcnow()
    ended_message = "\n%s %s %s %s %s\n" % (str(
        item.end_time), '#' * 30, "ENDED : ", item.name, '#' * 30)
    print ended_message
    LOGGER.info(ended_message)

    TC = re.findall(r'(tc\d+)', item.name)
    item.TC = TC
    status = getattr(item, "status", "Skip")
    if status == "Skip":
        message = getattr(item, "message", "Skipped")
        status = getattr(item, "status", "Skip")
        duration = getattr(item, "duration", 0)
        error = getattr(item, "error", "framework related error")
        for _tc in item.TC:
            pytest.tc_data['testcases'][item.name] = {
                "name": item.name,
                "message": message,
                "status": status,
                "duration": duration,
                "end_time": str(item.end_time),
                "id": _tc,
                "error": error,
                "file": os.path.join(utils.get_base_dir_path(),
                                     item.location[0])
            }
            try:
                pytest.tc_data['testcases'][item.name] = {
                    "start_time": str(item.start_time)
                }
            except:
                pass
    else:
        message = getattr(item, "message", "Failed")
        status = getattr(item, "status", "Fail")
        duration = getattr(item, "duration", 0)
        error = getattr(item, "error", "framework related error")
        for _tc in item.TC:
            pytest.tc_data['testcases'][item.name] = {
                "name": item.name,
                "message": message,
                "status": status,
                "duration": duration,
                "start_time": str(item.start_time),
                "end_time": str(item.end_time),
                "id": _tc,
                "error": error,
                "file": os.path.join(utils.get_base_dir_path(),
                                     item.location[0])
            }
예제 #2
0
def pytest_xdist_setupnodes(config, specs):
    """ called before any remote node is set up. """
    logger = logging.getLogger(TestLog.TEST_LOGGER)

    stream_list_reverse = config.streamInfo[::-1]
    log_dir = os.path.join(utils.get_base_dir_path(), TestDirectory.OUTPUT_DIR)
    for i, stream in enumerate(stream_list_reverse):
        specs[i].id = stream
        log_file = os.path.join(log_dir, stream + '.log')
        msg = "All log statements related to stream: %s will be updated to %s" % (
            stream, log_file)
        logging.info(msg)
        # write in master log file about stream logging details
        logger.info(msg)
예제 #3
0
def pytest_sessionstart(session):
    pytest.suite_start_time = datetime.utcnow()

    pytest.tc_data = OrderedDict()
    pytest.tc_data['testcases'] = OrderedDict()
    pytest.tc_data['summary'] = OrderedDict()
    pytest.tc_data['summary']['counts'] = OrderedDict()
    pytest.tc_data['summary']['percentage'] = OrderedDict()

    test_setup.init_logger(
        os.environ.get("PYTEST_XDIST_WORKER", TestLog.TEST_LOGGER))
    test_setup.init_logger(TestLog.DEBUG_LOGGER, TestLog.LOG_FORMAT_DEBUG)

    rally_result_file = os.path.join(utils.get_base_dir_path(),
                                     TestReport.RALLY_RESULT_FILE)
    if os.path.isfile(rally_result_file):
        os.unlink(rally_result_file)
예제 #4
0
def execute_pre_check():
    # validated oc CLI
    result = utils.execute_local('oc')
    if result[0]:
        pytest.fail(
            "OpenShift CLI: oc command is not installed in this system")

    # validate kubeconfig for VMR
    base_path = os.path.join(utils.get_base_dir_path(),
                             TestDirectory.CONFIG_DIR)
    kubeconfig_path = os.path.join(base_path, VMR_KUBECONFIG)

    if os.path.isfile(kubeconfig_path) is False:
        pytest.fail("VMR KUBECONFIG FILE NOT FOUND: %s" % kubeconfig_path)

    os.environ["KUBECONFIG"] = kubeconfig_path
    result = utils.execute_local('oc project vmr')
    if result[0]:
        pytest.fail("Invalid VMR KUBECONFIG: %s" % kubeconfig_path)
예제 #5
0
def test_rtc10602_tc_launch_delete(stream):
    """
    TC10602: test module for the longevity delete recordings test suite
    # delete recording as per the configuration for number of deletion and duration for deletion
    """

    longevity_confg = utils.get_spec_config()
    # no need for this numofaccts usage with state 3 dataplane
    # numofaccts = longevity_confg['systemvalues']['numberofaccounts']
    deletespermin = longevity_confg['recording']['recordingspermin']
    test_dur_in_min = longevity_confg['recording']['testdurationinmins']

    try:

        json_file = "recording_list"
        temp_dir = os.path.join(utils.get_base_dir_path(), TestDirectory.TEMP_DIR)
        rec_list_json_file = os.path.join(temp_dir, json_file + '.json')
        assert os.path.exists(rec_list_json_file), ValidationError.FILE_UNAVAILABLE.format(rec_list_json_file)

        with open(rec_list_json_file) as rec_json_file:
            rec_dict = json.load(rec_json_file)
            print rec_dict

        assert rec_dict, ValidationError.FILE_UNAVAILABLE.format(rec_dict)

        # convert yaml parameters to minutes and seconds
        if isinstance(test_dur_in_min, int):
            test_duration_in_seconds = test_dur_in_min * constants.SECONDS_IN_MINUTE

            if isinstance(deletespermin, int):
                # two different scenarios
                # 1: deletion count is lesser than the 60. sleep time will be more than a second.
                # 2: deletion count is more than 60. More than 1 deletion per second.

                deletions_per_sec_rounded = float(deletespermin) / float(test_duration_in_seconds)
                sleep_duration = 1
                if 0.0 < deletions_per_sec_rounded and 1.0 > deletions_per_sec_rounded:
                    deletions_per_sec_rounded = 1
                    sleep_duration = constants.SECONDS_IN_MINUTE / deletespermin
                    total_test_loop_counts = test_duration_in_seconds / sleep_duration
                elif 1 <= int(round(deletions_per_sec_rounded)):
                    deletions_per_sec_rounded = int(round(deletions_per_sec_rounded))
                    sleep_duration = 1
                    total_test_loop_counts = test_duration_in_seconds

                print
                print "[INFO: ] total_test_loop_counts ", total_test_loop_counts
                print "[INFO: ] deletions_per_sec_rounded ", deletions_per_sec_rounded
                print "[INFO: ] sleep duration ", sleep_duration
                print

                rec_list = rec_dict.get('rec_ids')
                for i in range(total_test_loop_counts):
                    rec_list, rec_dict = delete_recordings(deletions_per_sec_rounded, rec_list, rec_dict)
                    time.sleep(sleep_duration)
            else:
                assert False, ValidationError.TYPE_MISMATCH.format('deletespermin is not a number: {0}'.format(deletespermin))

        else:
            assert False, ValidationError.TYPE_MISMATCH.format('testduration is not a number: {0}'.format(testduration))

    finally:
        if rec_list_json_file:
            with open(rec_list_json_file, 'w') as rec_json_file:
                json.dump(rec_dict, rec_json_file)
                print rec_dict
예제 #6
0
def setup(request):
    """
    Set up the environment for the test run. This will be executed by default automatically before every test run.
    :param request: the incoming context
    """
    def teardown():
        """
        Clean up the environment post the test run. This will be executed when all the tests are executed.
        """
        test_setup.prepare_test_summary(request)

    request.addfinalizer(teardown)

    base_path = os.path.join(utils.get_base_dir_path(),
                             TestDirectory.CONFIG_DIR)
    kubeconfig_path = os.path.join(base_path, VMR_KUBECONFIG)
    config.load_kube_config(os.path.join(os.environ["HOME"], kubeconfig_path))

    # to cleanup log and change vle_default.conf file interface value based upon the system interface name"
    utils.vle_log_cleanup()
    utils.vle_default_config_interface_change()

    times_are_synchronized, error = test_setup.are_times_synchronized()
    if not times_are_synchronized:
        pytest.fail(error)

    LOGGER.info(
        "All components including the test machine are time synchronized as required"
    )

    playback_host_resolved, error = test_setup.is_playback_host_resolved()
    if not playback_host_resolved:
        pytest.fail(error)

    streams_info, drift_avail, stream_drift_info, drift_applicable = test_setup.find_live_point_drift(
    )
    generic_direct_config = utils.get_direct_config()
    generic_config = utils.get_spec_config()

    if not drift_applicable:
        warnings.warn(
            UserWarning(stream_drift_info +
                        'Not applicable drift streams removed.'))

    # Fail if time drift present in stream, but drift handle is False
    if not (generic_direct_config.get(Component.DRIFT_HANDLE,
                                      False)) and drift_avail:
        message = stream_drift_info + " stream has drift but drift handle is set to False, hence failing the test execution"
        LOGGER.error("Stream -> drift: %s" % stream_drift_info)
        LOGGER.error(message)
        pytest.exit(message)

    # Expose the stream details as environment variable
    if generic_direct_config.get(Component.DRIFT_HANDLE, False):
        status = utils.set_pytest_stream_info(streams_info)

    if utils.is_v2pc():
        v2pc_auth_token = utils.get_v2pc_api_auth_token()
        if not v2pc_auth_token:
            pytest.fail("Unable to fetch V2PC authorization token")
        constants.V2pc.auth_token = v2pc_auth_token