def test_rtc9812_tc_des_003_dcm_multi_cast_ip_block_in_progress_recording():
    """
    Block Incoming MCE network interface after recording starts and unblock it before the recording ends,
    Validate the recording state against INCOMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    start_duration = 30
    block_trigger_time = 30
    block_duration = 60
    try:
        rev_cmds = {}
        des_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS)

            if mce_node[Interface.DATA_IN] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip,
                                                                      destructive.MCE_JOB_IDS, constants.MINUTES * 10)
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip, destructive.MCE_JOB_IDS,
                                                   constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

            des_cmds[mce_ip] = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(DestructiveTesting.IFB_INTERFACE,
                                                                               DestructiveTesting.PACKET_LOSS_BLOCK)
            des_cmds[mce_ip] = destructive_utils.get_incoming_tc_cmd(mce_data_in, des_cmds[mce_ip])

        # Create recording to block in progress recording

        start_time = utils.get_formatted_time(constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 210, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, error = validate_recordings.validate_notification(web_service_obj, constants.RecordingStatus.STARTED,
                                                                    constants.SECONDS * start_duration)

        assert is_valid, error

        time.sleep(constants.SECONDS * block_trigger_time)

        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_node[Component.IP])

            if des_cmds[mce_ip]:
                # expected outcome after the destructive commands are run
                expected_result = {DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS_BLOCK,
                                   DestructiveTesting.SRC: DestructiveTesting.NETWORK}
                is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, DestructiveTesting.IFB_INTERFACE,
                                                                         des_cmds[mce_ip], expected_result)
                assert is_des_effective, error
            else:
                LOGGER.info("Executing the command=%s to cause destruction in the component", des_cmds[mce_ip])
                ssh_client.exec_command(des_cmds[mce_ip])

        time.sleep(constants.SECONDS * block_duration)
        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_node[Component.IP])
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                                                                      DestructiveTesting.IFB_INTERFACE, destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(ssh_client, DestructiveTesting.IFB_INTERFACE)
            assert rev_effective, error

        recording_id = response[RecordingAttribute.RECORDING_ID]
        is_valid, error = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.INCOMPLETE], web_service_obj=web_service_obj, end_time=end_time)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_segments_threshold_storage(
            recording_id, constants.SECONDS * block_duration)
        assert is_valid, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9804_tc_des_011_mce_ni_packet_latency():
    """
    Introduce the latency on each packet on the outgoing MCE Interface, trigger a recording and verify if the
    recording is successful
    """
    ssh_client = None
    response = None
    web_service_obj = None
    start_duration = 30
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_out = mce_node[Interface.DATA_OUT]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS)

            rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(
                ssh_client, mce_data_out, mce_ip, destructive.MCE_JOB_IDS,
                constants.MINUTES * 10)
            des_cmd = DestructiveTesting.PACKET_LATENCY_OUTGOING_INTERFACE.format(
                mce_data_out, DestructiveTesting.PACKET_LATENCY)
            des_cmd = destructive_utils.get_outgoing_tc_cmd(
                mce_data_out, des_cmd)

            # expected outcome after the destructive commands are run
            expected_result = {
                DestructiveTesting.DELAY: DestructiveTesting.PACKET_LATENCY,
                DestructiveTesting.DST: DestructiveTesting.NETWORK
            }
            is_des_effective, error = destructive_utils.exec_des_cmd(
                ssh_client, mce_data_out, des_cmd, expected_result)
            assert is_des_effective, error

        start_time = utils.get_formatted_time(
            constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS,
            STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        recording_id = response[RecordingAttribute.RECORDING_ID]
        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)
        assert is_valid, error

        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            mce_data_out = mce_node[Interface.DATA_OUT]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(
                    COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                    mce_data_out, destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(
                    ssh_client, mce_data_out)
            assert rev_effective, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#3
0
def test_rtc9801_tc_des_008_mce_ni_block_pending_recording():
    """
    Block traffic on the outgoing MCE interface, trigger a recording(4 minutes) and unblock the interface after 2 minutes
    Check if the recording is INCOMPLETE. Verify the playback of recording
    """
    ssh_client = None
    response = None
    start_duration = 30
    end_duration = 270
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_out = mce_node[Interface.DATA_OUT]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS)

            des_cmd = DestructiveTesting.PACKET_LOSS_OUTGOING_INTERFACE.format(
                mce_data_out, DestructiveTesting.PACKET_LOSS_BLOCK)
            des_cmd = destructive_utils.get_outgoing_tc_cmd(
                mce_data_out, des_cmd)

            if mce_node[Interface.DATA_OUT] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(
                    ssh_client, mce_data_out, mce_ip, destructive.MCE_JOB_IDS,
                    constants.MINUTES * 10)

                expected_result = {
                    DestructiveTesting.LOSS:
                    DestructiveTesting.PACKET_LOSS_BLOCK,
                    DestructiveTesting.DST: DestructiveTesting.NETWORK
                }
                is_des_effective, error = destructive_utils.exec_des_cmd(
                    ssh_client, mce_data_out, des_cmd, expected_result)
                assert is_des_effective, error
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_out,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS,
                                                   constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

                LOGGER.info(
                    "Executing the command=%s to cause destruction in the component",
                    des_cmd)
                ssh_client.exec_command(des_cmd)

        start_time = utils.get_formatted_time(
            constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS,
            STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * end_duration,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        time.sleep(end_duration + constants.TIME_DELTA)

        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            mce_data_out = mce_node[Interface.DATA_OUT]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(
                    COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                    mce_data_out, destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(
                    ssh_client, mce_data_out)
            assert rev_effective, error

        is_valid, rec_error = validate_recordings.validate_recording_end_state(
            response[RecordingAttribute.RECORDING_ID],
            [RecordingStatus.INCOMPLETE],
            web_service_obj=response[RecordingAttribute.WEB_SERVICE_OBJECT])

        recording_response = rio.find_recording(
            response[RecordingAttribute.RECORDING_ID]).json()
        LOGGER.debug("Recording response=%s", recording_response)

        assert is_valid, rec_error

        # validate playback to check if available segments are recorded
        is_valid, error = validate_recordings.validate_playback(
            response[RecordingAttribute.RECORDING_ID])

        assert is_valid, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if response:
            response[RecordingAttribute.WEB_SERVICE_OBJECT].stop_server()
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#4
0
def test_rtc9805_tc_des_012_cos_ni_block_pending_recording():
    """
    Block Incoming COS interface before the recording starts and unblock the cos interface before recording complete,
    Validate the recording state against INCOMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    start_duration = 30
    block_duration = 90
    rev_interfaces_dict = {}
    component_dict = {}
    cos_nodes = v2pc_helper.get_cos_node_data()

    ifbcount = 0
    try:
        for node in cos_nodes:
            comp_ip = node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=comp_ip,
                                              password=COS_PASSWORD)

            # deleting the previously scheduled, in order not to tamper with the current test case
            LOGGER.info(destructive.COS_JOB_IDS)
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   comp_ip,
                                                   destructive.COS_JOB_IDS)

            # interface takes only data interfaces
            for interface in node[Interface.INTERFACES]:
                cos_data_in = interface

                ifb_interface = DestructiveTesting.IFB_INTERFACE + str(
                    ifbcount)

                rev_cmd = destructive_utils.schedule_rev_cmd(
                    ssh_client, cos_data_in, comp_ip, destructive.COS_JOB_IDS,
                    constants.MINUTES * 2, ifb_interface)

                # Storing the revert command with its respective interface
                rev_interfaces_dict[cos_data_in] = rev_cmd

                des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(
                    ifb_interface, DestructiveTesting.PACKET_LOSS_BLOCK)

                des_cmd = destructive_utils.get_incoming_tc_cmd(
                    cos_data_in, des_cmd, ifb_interface)

                # expected outcome after the destructive commands are run
                expected_result = {
                    DestructiveTesting.LOSS:
                    DestructiveTesting.PACKET_LOSS_BLOCK,
                    DestructiveTesting.SRC: DestructiveTesting.NETWORK
                }
                is_des_effective, error = destructive_utils.exec_des_cmd(
                    ssh_client, ifb_interface, des_cmd, expected_result)

                assert is_des_effective, error
                ifbcount += 1

            # Storing Interfaces and Revert Command with its respective Component IP
            component_dict[comp_ip] = rev_interfaces_dict

        start_time = utils.get_formatted_time(
            constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS,
            STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 210,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        # Block duration 120 seconds
        time.sleep(constants.SECONDS * (start_duration + block_duration))

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        LOGGER.debug("after web service obj %s", web_service_obj)

        # executing the revert command to undo the destructive commands
        for component_ip, values in component_dict.items():
            for interface, rev_cmds in values.items():
                destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client,
                                               component_ip, interface,
                                               rev_cmds,
                                               destructive.COS_JOB_IDS)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        is_valid, error = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.INCOMPLETE],
            web_service_obj=web_service_obj,
            end_time=end_time)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_segments_threshold_storage(
            recording_id, constants.SECONDS * block_duration)
        assert is_valid, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)

    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#5
0
def test_rtc9813_tc_des_006_dcm_multi_cast_ip_packet_loss_recording():
    """
    Introduce 30% packet loss on the Incoming MCE network interface throughout the recording life time,
    Validate recording state against INCOMPLETE or COMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS)

            if mce_node[Interface.DATA_IN] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip,
                                                                      destructive.MCE_JOB_IDS, constants.MINUTES * 10)
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip, destructive.MCE_JOB_IDS, constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

            des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(DestructiveTesting.IFB_INTERFACE,
                                                                               DestructiveTesting.PACKET_LOSS)
            des_cmd = destructive_utils.get_incoming_tc_cmd(mce_data_in, des_cmd)

            # expected outcome after the destructive commands are run
            expected_result = {DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS,
                               DestructiveTesting.SRC: DestructiveTesting.NETWORK}
            is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, DestructiveTesting.IFB_INTERFACE,
                                                                     des_cmd, expected_result)
            assert is_des_effective, error

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE], web_service_obj=web_service_obj,
            end_time=end_time)

        assert is_valid, desc
        if is_valid and desc == RecordingStatus.COMPLETE:
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id, VALIDATOR_TYPE=vle_validators_configuration.PLAYBACK_VALIDATION_COMPLETE)
            assert is_valid, error

        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_node[Component.IP])
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, mce_ip,
                                                                      DestructiveTesting.IFB_INTERFACE, rev_cmds[mce_ip],
                                                                      destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(ssh_client, DestructiveTesting.IFB_INTERFACE)
            assert rev_effective, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#6
0
def test_rtc9807_tc_des_014_cos_ni_packet_loss():
    """
    Introduce 30% packet loss on the Incoming COS network interface throughout the recording life time,
    Validate recording state against INCOMPLETE or COMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    rev_if_dict = {}
    component_dict = {}
    ifbcount = 0
    cos_nodes = v2pc_helper.get_cos_node_data()

    try:
        for node in cos_nodes:
            comp_ip = node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=comp_ip,
                                              password=COS_PASSWORD)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper
            # with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   comp_ip,
                                                   destructive.COS_JOB_IDS)

            # interface takes only data interfaces
            for interface in node[Interface.INTERFACES]:
                cos_data_in = interface

                ifb_interface = DestructiveTesting.IFB_INTERFACE + str(
                    ifbcount)

                rev_cmd = destructive_utils.schedule_rev_cmd(
                    ssh_client, cos_data_in, comp_ip, destructive.COS_JOB_IDS,
                    constants.MINUTES * 2, ifb_interface)

                # Storing the revert command with its respective interface
                rev_if_dict[cos_data_in] = rev_cmd

                des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(
                    ifb_interface, DestructiveTesting.PACKET_LOSS)

                des_cmd = destructive_utils.get_incoming_tc_cmd(
                    cos_data_in, des_cmd, ifb_interface)

                # expected outcome after the destructive commands are run
                expected_result = {
                    DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS,
                    DestructiveTesting.SRC: DestructiveTesting.NETWORK
                }
                is_des_effective, error = destructive_utils.exec_des_cmd(
                    ssh_client, ifb_interface, des_cmd, expected_result)
                assert is_des_effective, error
                ifbcount += 1

            # Storing Interfaces and Revert Command with its respective Component IP
            component_dict[comp_ip] = rev_if_dict

        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id,
            [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE],
            web_service_obj=web_service_obj,
            end_time=end_time)
        assert is_valid, desc

        if is_valid and desc == RecordingStatus.COMPLETE:
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id,
                VALIDATOR_TYPE=vle_validators_configuration.
                PLAYBACK_VALIDATION_COMPLETE)
            assert is_valid, error

        # executing the revert command to undo the destructive commands
        for component_ip, values in component_dict.items():
            for interface, rev_cmds in values.items():
                destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client,
                                               component_ip, interface,
                                               rev_cmds,
                                               destructive.COS_JOB_IDS)

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)

    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#7
0
def test_rtc9814_tc_des_001_dcm_ports_profiles_block():
    """
    Block the individual incoming ports of MCE capturing a profile of the video, trigger a recording and
    verify recording is either complete or incomplete, and verify if rest of the profiles can be played back
    """
    ssh_client = None
    response = None

    stream = nsa.get_stream(STREAM_ID)
    if stream:
        profile_data = v2pc.get_stream_profile_data(stream.json()[0][constants.STREAM_NAME])
        profile_port = profile_data[Component.PORT]
        profile_bitrate = int(profile_data[Component.BITRATE])
    else:
        assert False, ValidationError.STREAM_NOT_FOUND.format(STREAM_ID)

    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS)

            rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip,
                                                                  destructive.MCE_JOB_IDS, constants.MINUTES * 10)

            des_cmd = DestructiveTesting.PACKET_LOSS_PORT.format(DestructiveTesting.IFB_INTERFACE,
                                                                 DestructiveTesting.PACKET_LOSS_BLOCK, profile_port)
            des_cmd = destructive_utils.get_incoming_tc_cmd(mce_data_in, des_cmd)

            expected_result = {DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS_BLOCK,
                               DestructiveTesting.SRC: DestructiveTesting.NETWORK, DestructiveTesting.DPORT: profile_port}
            is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, DestructiveTesting.IFB_INTERFACE,
                                                                     des_cmd, expected_result)
            assert is_des_effective, error

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)
        recording_id = response[RecordingAttribute.RECORDING_ID]

        recording_status = [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE]
        is_valid, error = validate_recordings.validate_recording_end_state(recording_id,
                                                                           recording_status, web_service_obj=response[
                                                                           RecordingAttribute.WEB_SERVICE_OBJECT],
                                                                           end_time=end_time)
        assert is_valid, error

        is_valid, bitrates = utils.get_video_profiles_from_m3u8(recording_id)
        assert is_valid, bitrates
        # If Valid, bitrates will contain the list of video profiles
        assert bitrates, ValidationError.VIDEO_PROFILES_NOT_FOUND.format(recording_id)

        if profile_bitrate not in bitrates:
            assert False, ValidationError.STREAM_BITRATE_UNAVAILABLE_IN_M3U8.format(profile_bitrate, STREAM_ID)
        bitrates.remove(profile_bitrate)

        # verifying if the rest of the profiles can be played back
        playback_error = None
        if bitrates:
            for bitrate in bitrates:
                vle_request_params = {Vle.DOWNLOAD_BITRATE: bitrate}
                is_valid, playback_error = validate_recordings.validate_playback_using_vle(
                    recording_id, VLE_REQUEST_PARAMS=vle_request_params)
                if not is_valid:
                    break
        else:
            is_valid = False
            playback_error = ValidationError.BITARTES_NOT_AVAILABLE_TO_PLAYBACK

        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            rev_effective, error = destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                                                                  DestructiveTesting.IFB_INTERFACE, destructive.MCE_JOB_IDS)
            assert rev_effective, error

        assert is_valid, playback_error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if response:
            response[RecordingAttribute.WEB_SERVICE_OBJECT].stop_server()
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9808_tc_des_015_cos_in_packet_latency():
    """
    Introduce packet latency of 500ms on each packet on the COS incoming network interface,
    trigger a recording and verify if playback has an acceptable latency ~500ms
    """
    ssh_client = None
    response = None
    web_service_obj = None
    rev_interfaces_dict = {}
    component_dict = {}
    ifb_count = 0
    cos_nodes = v2pc_helper.get_cos_node_data()

    try:
        for node in cos_nodes:
            component_ip = node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME,
                                              component_ip=component_ip, password=COS_PASSWORD)

            # deleting the previously scheduled jobs,in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, component_ip, destructive.COS_JOB_IDS)

            for interface in node[Interface.INTERFACES]:
                cos_data_in = interface

                ifb_interface = DestructiveTesting.IFB_INTERFACE + str(ifb_count)
                rev_cmd = destructive_utils.schedule_rev_cmd(ssh_client, cos_data_in, component_ip,
                                                             destructive.COS_JOB_IDS,
                                                             constants.MINUTES * 2, ifb_interface)

                rev_interfaces_dict[cos_data_in] = rev_cmd

                des_cmd = DestructiveTesting.PACKET_LATENCY_INCOMING_INTERFACE.format(ifb_interface,
                                                                                      DestructiveTesting.PACKET_LATENCY)

                des_cmd = destructive_utils.get_incoming_tc_cmd(cos_data_in, des_cmd, ifb_interface)

                # expected outcome after the destructive commands are run
                expected_result = {DestructiveTesting.DELAY: DestructiveTesting.PACKET_LATENCY,
                                   DestructiveTesting.SRC: DestructiveTesting.NETWORK}
                is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, ifb_interface,
                                                                         des_cmd, expected_result)
                assert is_des_effective, error
                ifb_count += 1
            # Adding component ip and rev cmd to the dictionary
            component_dict[component_ip] = rev_interfaces_dict

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.COMPLETE], web_service_obj=web_service_obj,
            end_time=end_time)
        assert is_valid, desc
        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id, VALIDATOR_TYPE=vle_validators_configuration.PLAYBACK_VALIDATION_COMPLETE)
        assert is_valid, error

        # executing the revert command to undo the destructive commands
        for component_ip, values in component_dict.items():
            for interface, rev_cmds in values.items():
                destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, component_ip,  interface, rev_cmds,
                                               destructive.COS_JOB_IDS)

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9809_tc_des_004_dcm_multicast_ip_block_lifetime_recording():
    """
    Block traffic on the incoming MCE interface throughout the lifetime of recording and verify if the recording
    is failed
    """
    ssh_client = None
    response = None
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS)

            des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(
                DestructiveTesting.IFB_INTERFACE,
                DestructiveTesting.PACKET_LOSS_BLOCK)
            des_cmd = destructive_utils.get_incoming_tc_cmd(
                mce_data_in, des_cmd)

            if mce_node[Interface.DATA_IN] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(
                    ssh_client, mce_data_in, mce_ip, destructive.MCE_JOB_IDS,
                    constants.MINUTES * 10)

                expected_result = {
                    DestructiveTesting.LOSS:
                    DestructiveTesting.PACKET_LOSS_BLOCK,
                    DestructiveTesting.SRC: DestructiveTesting.NETWORK
                }
                is_des_effective, error = destructive_utils.exec_des_cmd(
                    ssh_client, DestructiveTesting.IFB_INTERFACE, des_cmd,
                    expected_result)
                assert is_des_effective, error
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS,
                                                   constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

                LOGGER.info(
                    "Executing the command=%s to cause destruction in the component",
                    des_cmd)
                ssh_client.exec_command(des_cmd)

        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        is_valid, rec_error = validate_recordings.validate_recording_end_state(
            response[RecordingAttribute.RECORDING_ID],
            [RecordingStatus.FAILED],
            web_service_obj=response[RecordingAttribute.WEB_SERVICE_OBJECT],
            end_time=end_time)

        recording_response = rio.find_recording(
            response[RecordingAttribute.RECORDING_ID]).json()
        LOGGER.debug("Recording response=%s", recording_response)

        time.sleep(constants.SECONDS * 60)
        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(
                    COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                    DestructiveTesting.IFB_INTERFACE, destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(
                    ssh_client, DestructiveTesting.IFB_INTERFACE)
            assert rev_effective, error

        assert is_valid, rec_error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if response:
            response[RecordingAttribute.WEB_SERVICE_OBJECT].stop_server()
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)