def test_rtc9777_tc_arc_007_archive_playback_re_archive_unique(stream):
    """
    Create a recording with copy type as UNIQUE, wait till archival completes, playback the recording and validate
    re-archival
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)

        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.COMPLETE)
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                         Cos.RECORDING_STORED)
        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.RE_ARCHIVE, Archive.COMPLETE)

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)
        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                         Cos.RECORDING_NOT_STORED)
        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#2
0
def test_rtc9775_tc_arc_005_playback_archive_in_progress(stream):
    """
    Create a recording with copy type as UNIQUE and play it back when archival is in progress
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)

        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.IN_PROGRESS)
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         [Cos.RECORDING_STORED, Cos.RECORDING_PARTIALLY_STORED])

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id)

        assert is_valid, error

        # If we playback recording when archival is INPROGRESS, There is a chance of few segments being played from
        # Active storage. This means that only part of the segments would move from archive to recon.
        # The rest will move from active to archive. So both PARTIALLY_STORED and STORED are valid
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                         [Cos.RECORDING_STORED, Cos.RECORDING_PARTIALLY_STORED])

        assert is_valid, ValidationError.SEGMENT_NOT_MOVED.format(Cos.RECON_STORAGE, recording_id)

    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#3
0
def test_deleted_recording_cannot_be_played_back_ipdvrtests_166(
        common_lib, stream):
    """
    JIRA ID : IPDVRTESTS-166
    JIRA LINK : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-166
    """
    recording = None
    web_service_obj = None

    try:
        # Step1: Create a 30 minute recording
        recording, web_service_obj = common_lib.create_recording(
            stream, rec_duration=Component.LARGE_REC_LEN_IN_SEC)
        recording_id = recording.get_entry(0).RecordingId
        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        # Step2: Delete the recording
        response = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error
        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id)
        assert is_valid, error

        # Step 3: Validating playback
        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id)
        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id)

    finally:
        if web_service_obj: web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#4
0
def test_ipdvrtests59_restart_workflow_no_v2pc(channel):
    recording = None
    web_service_obj = None
    mce_wf_name = mpe_wf_name = None
    try:

        start_time = utils.get_formatted_time(
            (constants.SECONDS * 30), TimeFormat.TIME_FORMAT_MS, channel)
        end_time = utils.get_formatted_time((constants.SECONDS * 60),
                                            TimeFormat.TIME_FORMAT_MS, channel)
        copy_type = RecordingAttribute.COPY_TYPE_UNIQUE
        LOGGER.debug("Stream Id : %s", channel)

        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=channel)

        recording_id = recording.get_entry(0).RecordingId
        LOGGER.info("Recording Id :%s", recording_id)

        # Create a notification handler
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created :%s", recording.serialize())

        # Create a Recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        # Get the Capture only [MCE] workflow
        mce_wf_name = \
            CONFIG_INFO[Component.GENERIC_CONFIG][constants.Component.STANDALONE][constants.Component.WORK_FLOW][
                constants.Component.CAPTURE_ONLY_WORKFLOW]
        LOGGER.info("MCE WorkFlow Name : %s", mce_wf_name)

        # Get the Playback only [MPE] workflow
        mpe_wf_name = \
            CONFIG_INFO[Component.GENERIC_CONFIG][constants.Component.STANDALONE][constants.Component.WORK_FLOW][
                constants.Component.WORKFLOW_NAME]
        LOGGER.info("MPE WorkFlow Name : %s", mpe_wf_name)

        # resp = v2pcapi.fetch_workflow_status(mce_wf_name)
        # LOGGER.info("MCE MediaWorkflow : %s", resp.content)

        # Verify Channel Capturing status
        channel_res, response = v2pcapi.verify_channel_state(
            channel, Stream.CAPTURING)
        assert channel_res, "Channel is in %s state" % response

        # Step 1: Disable a running Capture only [MCE] workflow.
        stop_mce_result, stop_mce_resp = v2pcapi.workflow_change_state(
            mce_wf_name, MediaWorkflow.DISABLE, time_out=120)
        assert stop_mce_result, stop_mce_resp

        # Verify Channel Capturing status is stopped
        channel_res, ch_response = v2pcapi.verify_channel_state(
            channel, Stream.CAPTURING)
        assert not channel_res, ch_response

        # Step 2: Enable the workflow again
        start_mce_rslt, start_mce_resp = v2pcapi.workflow_change_state(
            mce_wf_name, MediaWorkflow.ENABLE, time_out=120)
        assert start_mce_rslt, start_mce_resp

        # Check channel goes back to Capturing state
        channel_result1, ch_resp1 = v2pcapi.verify_channel_state(
            channel, Stream.CAPTURING)
        assert channel_result1, "Channel State : %s" % ch_resp1

        # Step 2a: Restart the Playback only workflow
        # resp2 = v2pcapi.fetch_workflow_status(mpe_wf_name)
        # LOGGER.info("MPE MediaWorkflow : %s", resp2.content)

        # Disable a running Playback only [MPE] workflow.
        stop_mpe_wf_result, stop_resp2 = v2pcapi.workflow_change_state(
            mpe_wf_name, MediaWorkflow.DISABLE, time_out=120)
        assert stop_mpe_wf_result, stop_resp2

        # Enable the MPE workflow again
        start_mpe_wf_result, stop_resp3 = v2pcapi.workflow_change_state(
            mpe_wf_name, MediaWorkflow.ENABLE, time_out=120)
        assert start_mpe_wf_result, stop_resp3

        # Step 3: Validate the playback
        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id)
        assert is_valid, error

        # Check MCE CoreDump
        LOGGER.debug("Validating MCE core dump")
        is_valid, error = core_dump("mce")
        assert is_valid, error

        # Check MPE Logs and CoreDump
        # It can not be automated - Can not get the core dump for standalone installation of MPE
        # Todo: Need to update this step once the MPE team provides support.

    finally:
        if mce_wf_name:
            v2pcapi.workflow_change_state(mce_wf_name,
                                          MediaWorkflow.ENABLE,
                                          time_out=120)
        if mpe_wf_name:
            v2pcapi.workflow_change_state(mpe_wf_name,
                                          MediaWorkflow.ENABLE,
                                          time_out=120)
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#5
0
def test_rtc9763_tc_rec_027_multiple_delete_playback_unique(stream):
    """
    Create multiple recordings with copy type as UNIQUE, delete one and playback the rest
    """
    total_recordings = 3
    web_service_objs = []
    playback_pool = None
    recording = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(
            total_recordings=total_recordings,
            StartTime=start_time,
            EndTime=end_time,
            StreamId=stream)

        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        recording_id_0 = recording.get_entry(0).RecordingId

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objs[i]),
                callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recordings):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ACTIVE_STORAGE, total_recordings)

            assert is_valid, error

        recording_to_delete = copy.deepcopy(recording)
        del recording_to_delete.get_entries()[1:]
        del recording.get_entries()[:1]  # to clean up recordings later
        response = a8.delete_recording(recording_to_delete)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id_0)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id_0)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id_0)

        playback_pool = mp_pool.ThreadPool(
            processes=len(recording.get_entries()))
        for recording_entry in recording.get_entries():
            playback_pool.apply_async(
                validate_recordings.validate_playback_using_vle,
                (recording_entry.RecordingId, ),
                callback=queue.put)

        for i in range(len(recording.get_entries())):
            is_valid, error = queue.get()
            assert is_valid, error
    finally:
        if playback_pool:
            playback_pool.close()
            playback_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#6
0
def test_tc_arc_(name, stream, event):
    """
    test_tc_arc_[009_multiple_playback_one_unique-ARCHIVE -->    Create multiple recordings with copy type as UNIQUE, wait for them to get archived, playback one of the them and
    verify if the recording is present both in the ARCHIVE_STORAGE AND RECON_STORAGE
    test_tc_arc_[011_multiple_playback_one_re_archive_unique-RE_ARCHIVE -->  Create multiple recordings with copy type as UNIQUE, wait for them to get archived, playback one, and wait till it
    gets re-archived
    """
    LOGGER.info("#####################################################################")
    LOGGER.info("Starting test_tc_arc_%s", name)

    total_recordings = 3
    recording = None
    web_service_objs = []
    recording_pool = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=total_recordings, StartTime=start_time, EndTime=end_time,
                                              StreamId=stream)
        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        recording_id = recording.get_entry(0).RecordingId

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_recording,
                                       (recording.get_entry(i).RecordingId, web_service_objs[i]), callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.COMPLETE)

        for i in range(total_recordings):
            response = rio.find_recording(recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                             Cos.RECORDING_NOT_STORED, i)

            assert is_valid, error

            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                             Cos.RECORDING_STORED)

            assert is_valid, error

            is_valid, error = validate_storage.validate_copy_count(response, Cos.ARCHIVE_STORAGE)

            assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id)

        assert is_valid, error
        response = rio.find_recording(recording.get_entry(0).RecordingId).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                     Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                     Cos.RECORDING_STORED)

        assert is_valid, error
        if event == Archive.RE_ARCHIVE:
            archive_helper.wait_for_archival(stream, recording_id, Archive.RE_ARCHIVE, Archive.COMPLETE)
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                             Cos.RECORDING_NOT_STORED)

            assert is_valid, error
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                             Cos.RECORDING_STORED)

            assert is_valid, error

        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_playback,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()

        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_rec_038_013_active_asset(stream, name, total_recording):
    """
    Bulk deletion of assets in Active(20 requests)
    """
    recording = None
    web_service_objects = []
    recording_pool = None
    recording_id_list = []

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_COMMON
        recording = recording_model.Recording(total_recordings=total_recording,
                                              StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_id_list.append(recording_id)
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error
        recording_pool = mp_pool.ThreadPool(processes=total_recording)

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objects[i]),
                callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            response = rio.find_recording(recording_id_list[i]).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

        response = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        for i in range(total_recording):
            is_valid, error = validate_recordings.validate_recording_deletion(
                recording_id_list[i])
            assert is_valid, error

            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id_list[i])
            assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
                recording_id_list[i])

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9760_tc_rec_014_delete_playback_in_progress_recording(stream):
    """
    Delete a recording while the playback is in progress and verify the playback
    """
    recording = None
    playback_pool = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 150,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)

        assert is_valid, error

        queue = Queue.Queue()

        playback_pool = mp_pool.ThreadPool(processes=1)
        playback_pool.apply_async(
            validate_recordings.validate_playback_using_vle, (recording_id, ),
            callback=queue.put)

        time.sleep(10 * constants.SECONDS)
        response = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id)

        assert is_valid, error

        is_valid, error = queue.get()

        if is_valid:
            # verifying whether the playback fails now, even though it succeeded previously
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id)
    finally:
        if playback_pool:
            playback_pool.close()
            playback_pool.join()
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_er_006_007_incomplete_delete_playback_US62461(stream, name, copy_type):
    """
    Create multiple recordings with copy type as COMMON
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10
    service_name = V2pc.MANIFEST_AGENT
    namespace = Component.VMR

    try:
        #Taking backup of v2pc pod config info, editing the config and then restarting the services
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(service_name, namespace, "4")
        assert is_valid, error

        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 160, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)
        for i in range(diff_start_time_recordings, same_start_time_recordings+diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * 30) + i, TimeFormat.TIME_FORMAT_MS, stream)
            end_time = utils.get_formatted_time((constants.SECONDS * 160) + i, TimeFormat.TIME_FORMAT_MS, stream)
            rec_with_diff_time = recording_model.Recording(total_recordings=1, StartTime=start_time, EndTime=end_time,
                                                           copyType=copy_type, StreamId=stream)
            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                                                        rec_with_diff_time.RequestId + '_' + str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error
      
        #restarting segment recorder to make INCOMPLETE recording
        is_valid, error = delete_vmr_pods(V2pc.SEGMENT_RECORDER)
        assert is_valid, error

        #Verifying recording INCOMPLETE STATE
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_pool.apply_async(validate_recordings.validate_recording_end_state,
                                       (recording_id, [constants.RecordingStatus.INCOMPLETE]),
                                       dict(web_service_obj=web_service_objects[i], end_time=end_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        recording.Entries.pop(-1)

        # Deleting all but one recording
        response = a8.delete_recording(recording) 

        #wait for georbage collect
        archive_helper.wait_for_archival(stream, recording.get_entry(-1).RecordingId, Archive.ARCHIVE, Archive.COMPLETE)

        #Verifying playback of recording	
        is_valid, error = validate_recordings.validate_playback_using_vle(rec_with_diff_time.Entries[0].RecordingId,)
        assert is_valid, error

    finally:
        #Revert back the v2pc config changes
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(rec_with_diff_time)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#10
0
def test_tc_arc(name, stream, archive_playback):
    """
    Schedule recording with same start time, end time with unique copy,
    playback recording single recording and delete it, finally playback remaining records.
    """
    total_recordings = 3
    recording = None
    web_service_objs = []
    recording_pool = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_UNIQUE
        recording = recording_model.Recording(
            total_recordings=total_recordings,
            StartTime=start_time,
            EndTime=end_time,
            copyType=copy_type,
            StreamId=stream)
        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()
            LOGGER.debug("Recording=%s, UpdateURL=%s", recording_id,
                         web_service_objs[i].get_url())

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objs[i]),
                callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error
        # Validating the copy count of unique copy recording in active storage
        for i in range(total_recordings):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ACTIVE_STORAGE, total_recordings)
            assert is_valid, error

        # Wait till archival completes
        recording_id_0 = recording.get_entry(0).RecordingId
        archive_helper.wait_for_archival(stream, recording_id_0,
                                         Archive.ARCHIVE, Archive.COMPLETE)
        # Validating copy count in archive storage after archival duration
        for i in range(total_recordings):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED)

            assert is_valid, error

            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ARCHIVE_STORAGE, 1)
            assert is_valid, error

        if archive_playback:
            response = rio.find_recording(recording_id_0).json()
            # Validating the first recording for playback
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id_0)
            assert is_valid, error

            # Validating the copy of segments in the archive folder after playback
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

            # Validating the segments in recon folder after playback
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.RECON_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

        response = a8.delete_recording(recording,
                                       recording.get_entry(0).RecordingId)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id_0)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id_0)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id_0)

        del recording.get_entries()[:1]
        # Check remaining records still in archive
        if archive_playback:
            for recording_entry in recording.get_entries():
                response = rio.find_recording(
                    recording_entry.RecordingId).json()
                recording_pool.apply_async(
                    validate_storage.validate_recording_in_storage,
                    (response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED),
                    callback=queue.put)

            for i in range(len(recording.get_entries())):
                is_valid, error = queue.get()
                assert is_valid, error

        for recording_entry in recording.get_entries():
            recording_pool.apply_async(
                validate_recordings.validate_playback_using_vle,
                (recording_entry.RecordingId, ),
                callback=queue.put)

        for i in range(len(recording.get_entries())):
            is_valid, error = queue.get()
            assert is_valid, error
    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#11
0
def test_ipdvrtests59_restart_workflow(channel):

    recording = None
    web_service_obj = None
    wf_name = None
    try:
        start_time = utils.get_formatted_time(
            (constants.SECONDS * 30), TimeFormat.TIME_FORMAT_MS, channel)
        end_time = utils.get_formatted_time((constants.SECONDS * 60),
                                            TimeFormat.TIME_FORMAT_MS, channel)
        copy_type = RecordingAttribute.COPY_TYPE_UNIQUE
        LOGGER.debug("Stream Id : %s", channel)

        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=channel)

        recording_id = recording.get_entry(0).RecordingId
        LOGGER.info("Recording Id :%s", recording_id)

        # Create a notification handler
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created :%s", recording.serialize())

        # Create a Recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        # Get the workflow
        wf_name = CONFIG_INFO[Component.GENERIC_CONFIG][
            constants.Component.V2PC][constants.Component.WORK_FLOW][
                constants.Component.WORKFLOW_NAME]
        LOGGER.info("WorkFlow Name : %s", wf_name)
        resp = v2pcapi.fetch_workflow_status(wf_name)
        LOGGER.info("Media Workflow before disable and enable: %s",
                    resp.content)

        # Verify Channel Capturing status
        channel_res, response = v2pcapi.verify_channel_state(
            channel, Stream.CAPTURING)
        assert channel_res, response

        # Step 1: Disable a running workflow.
        stop_wf_result, stop_wf_resp = v2pcapi.workflow_change_state(
            wf_name, MediaWorkflow.DISABLE, time_out=120)
        assert stop_wf_result, stop_wf_resp

        # Verify Channel Capturing status is stopped
        channel_res, response = v2pcapi.verify_channel_state(
            channel, Stream.CAPTURING)
        assert not channel_res, response

        # Step 2: Enable the workflow again
        start_wf_result, start_wf_resp = v2pcapi.workflow_change_state(
            wf_name, MediaWorkflow.ENABLE, time_out=120)
        assert start_wf_result, start_wf_resp

        # Check channel goes back to Capturing state
        channel_result, ch_resp = v2pcapi.verify_channel_state(
            channel, Stream.CAPTURING)
        assert channel_result, "Channel State : %s" % ch_resp

        # Step 3: Validate the playback
        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id)
        assert is_valid, error

        # Check MCE CoreDump
        LOGGER.debug("Validating MCE core dump")
        is_valid, error = core_dump("mce")
        assert is_valid, error

        # Check MPE Logs and CoreDump
        LOGGER.debug("Validating MPE core dump")
        is_valid, error = core_dump("mpe")
        assert is_valid, error

    finally:
        if wf_name:
            v2pcapi.workflow_change_state(wf_name,
                                          MediaWorkflow.ENABLE,
                                          time_out=120)
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_rtc9758_tc_rec_012_IPDVRTESTS_52_delete_and_playback_after_recording_start(
        recording_start_delay, stream):
    """
    Create a recording, delete and play it back after it starts
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(
            constants.SECONDS * recording_start_delay,
            TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        recording_id = recording.get_entry(0).RecordingId
        response = rio.find_recording(recording_id).json()
        LOGGER.debug("Response=%s", response)
        start_time = utils.get_parsed_time(
            response[0][RecordingAttribute.START_TIME][:-1])
        current_time = datetime.datetime.utcnow()

        # wait till the recording start time
        if current_time < start_time:
            utils.wait(start_time - current_time, constants.TIME_DELTA)
        is_valid, error = validate_recordings.validate_notification(
            web_service_obj, constants.RecordingStatus.STARTED)

        assert is_valid, error

        time.sleep(15 * constants.SECONDS)
        delete_time = utils.get_formatted_time(constants.SECONDS * 0,
                                               TimeFormat.TIME_FORMAT_MS,
                                               stream)
        response = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id, EndTime=delete_time)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id)
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#13
0
def test_rtc9813_tc_des_006_dcm_multi_cast_ip_packet_loss_recording():
    """
    Introduce 30% packet loss on the Incoming MCE network interface throughout the recording life time,
    Validate recording state against INCOMPLETE or COMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS)

            if mce_node[Interface.DATA_IN] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip,
                                                                      destructive.MCE_JOB_IDS, constants.MINUTES * 10)
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip, destructive.MCE_JOB_IDS, constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

            des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(DestructiveTesting.IFB_INTERFACE,
                                                                               DestructiveTesting.PACKET_LOSS)
            des_cmd = destructive_utils.get_incoming_tc_cmd(mce_data_in, des_cmd)

            # expected outcome after the destructive commands are run
            expected_result = {DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS,
                               DestructiveTesting.SRC: DestructiveTesting.NETWORK}
            is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, DestructiveTesting.IFB_INTERFACE,
                                                                     des_cmd, expected_result)
            assert is_des_effective, error

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE], web_service_obj=web_service_obj,
            end_time=end_time)

        assert is_valid, desc
        if is_valid and desc == RecordingStatus.COMPLETE:
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id, VALIDATOR_TYPE=vle_validators_configuration.PLAYBACK_VALIDATION_COMPLETE)
            assert is_valid, error

        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_node[Component.IP])
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, mce_ip,
                                                                      DestructiveTesting.IFB_INTERFACE, rev_cmds[mce_ip],
                                                                      destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(ssh_client, DestructiveTesting.IFB_INTERFACE)
            assert rev_effective, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#14
0
def test_delete_hot_recording_ipdvrtests_52(stream):
    """
    JIRA ID : IPDVRTESTS-52
    TITLE   : "Delete Rec(Hot recording)"
    STEPS   : Create a 30 min recording, when program is already started, mark the recording for deletion.
              Check memsql to verify erase time is set.
              Check cos http logs for delete segments.
              Playback should not be possible after marking it for delete.
    """
    recording = None
    web_service_obj = None

    try:
        rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC)
        start_time = utils.get_formatted_time(constants.SECONDS, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time((constants.SECONDS * rec_duration),
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        recording_id = recording.get_entry(0).RecordingId
        response = rio.find_recording(recording_id).json()
        LOGGER.debug("Response=%s", response)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])
        current_time = datetime.datetime.utcnow()

        # wait till the recording start time
        if current_time < start_time:
            utils.wait(start_time - current_time, constants.TIME_DELTA)
        is_valid, error = validate_recordings.validate_notification(web_service_obj, constants.RecordingStatus.STARTED)

        assert is_valid, error

        time.sleep(15 * constants.SECONDS)
        delete_time = utils.get_formatted_time(constants.SECONDS * 0, TimeFormat.TIME_FORMAT_MS, stream)
        response = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(recording_id)

        assert is_valid, error
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                         Cos.RECORDING_NOT_STORED)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id, EndTime=delete_time)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(recording_id)
    finally:
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
示例#15
0
def test_rtc9807_tc_des_014_cos_ni_packet_loss():
    """
    Introduce 30% packet loss on the Incoming COS network interface throughout the recording life time,
    Validate recording state against INCOMPLETE or COMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    rev_if_dict = {}
    component_dict = {}
    ifbcount = 0
    cos_nodes = v2pc_helper.get_cos_node_data()

    try:
        for node in cos_nodes:
            comp_ip = node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=comp_ip,
                                              password=COS_PASSWORD)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper
            # with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   comp_ip,
                                                   destructive.COS_JOB_IDS)

            # interface takes only data interfaces
            for interface in node[Interface.INTERFACES]:
                cos_data_in = interface

                ifb_interface = DestructiveTesting.IFB_INTERFACE + str(
                    ifbcount)

                rev_cmd = destructive_utils.schedule_rev_cmd(
                    ssh_client, cos_data_in, comp_ip, destructive.COS_JOB_IDS,
                    constants.MINUTES * 2, ifb_interface)

                # Storing the revert command with its respective interface
                rev_if_dict[cos_data_in] = rev_cmd

                des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(
                    ifb_interface, DestructiveTesting.PACKET_LOSS)

                des_cmd = destructive_utils.get_incoming_tc_cmd(
                    cos_data_in, des_cmd, ifb_interface)

                # expected outcome after the destructive commands are run
                expected_result = {
                    DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS,
                    DestructiveTesting.SRC: DestructiveTesting.NETWORK
                }
                is_des_effective, error = destructive_utils.exec_des_cmd(
                    ssh_client, ifb_interface, des_cmd, expected_result)
                assert is_des_effective, error
                ifbcount += 1

            # Storing Interfaces and Revert Command with its respective Component IP
            component_dict[comp_ip] = rev_if_dict

        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id,
            [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE],
            web_service_obj=web_service_obj,
            end_time=end_time)
        assert is_valid, desc

        if is_valid and desc == RecordingStatus.COMPLETE:
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id,
                VALIDATOR_TYPE=vle_validators_configuration.
                PLAYBACK_VALIDATION_COMPLETE)
            assert is_valid, error

        # executing the revert command to undo the destructive commands
        for component_ip, values in component_dict.items():
            for interface, rev_cmds in values.items():
                destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client,
                                               component_ip, interface,
                                               rev_cmds,
                                               destructive.COS_JOB_IDS)

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)

    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
示例#16
0
def test_rtc9814_tc_des_001_dcm_ports_profiles_block():
    """
    Block the individual incoming ports of MCE capturing a profile of the video, trigger a recording and
    verify recording is either complete or incomplete, and verify if rest of the profiles can be played back
    """
    ssh_client = None
    response = None

    stream = nsa.get_stream(STREAM_ID)
    if stream:
        profile_data = v2pc.get_stream_profile_data(stream.json()[0][constants.STREAM_NAME])
        profile_port = profile_data[Component.PORT]
        profile_bitrate = int(profile_data[Component.BITRATE])
    else:
        assert False, ValidationError.STREAM_NOT_FOUND.format(STREAM_ID)

    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS)

            rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip,
                                                                  destructive.MCE_JOB_IDS, constants.MINUTES * 10)

            des_cmd = DestructiveTesting.PACKET_LOSS_PORT.format(DestructiveTesting.IFB_INTERFACE,
                                                                 DestructiveTesting.PACKET_LOSS_BLOCK, profile_port)
            des_cmd = destructive_utils.get_incoming_tc_cmd(mce_data_in, des_cmd)

            expected_result = {DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS_BLOCK,
                               DestructiveTesting.SRC: DestructiveTesting.NETWORK, DestructiveTesting.DPORT: profile_port}
            is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, DestructiveTesting.IFB_INTERFACE,
                                                                     des_cmd, expected_result)
            assert is_des_effective, error

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)
        recording_id = response[RecordingAttribute.RECORDING_ID]

        recording_status = [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE]
        is_valid, error = validate_recordings.validate_recording_end_state(recording_id,
                                                                           recording_status, web_service_obj=response[
                                                                           RecordingAttribute.WEB_SERVICE_OBJECT],
                                                                           end_time=end_time)
        assert is_valid, error

        is_valid, bitrates = utils.get_video_profiles_from_m3u8(recording_id)
        assert is_valid, bitrates
        # If Valid, bitrates will contain the list of video profiles
        assert bitrates, ValidationError.VIDEO_PROFILES_NOT_FOUND.format(recording_id)

        if profile_bitrate not in bitrates:
            assert False, ValidationError.STREAM_BITRATE_UNAVAILABLE_IN_M3U8.format(profile_bitrate, STREAM_ID)
        bitrates.remove(profile_bitrate)

        # verifying if the rest of the profiles can be played back
        playback_error = None
        if bitrates:
            for bitrate in bitrates:
                vle_request_params = {Vle.DOWNLOAD_BITRATE: bitrate}
                is_valid, playback_error = validate_recordings.validate_playback_using_vle(
                    recording_id, VLE_REQUEST_PARAMS=vle_request_params)
                if not is_valid:
                    break
        else:
            is_valid = False
            playback_error = ValidationError.BITARTES_NOT_AVAILABLE_TO_PLAYBACK

        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            rev_effective, error = destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                                                                  DestructiveTesting.IFB_INTERFACE, destructive.MCE_JOB_IDS)
            assert rev_effective, error

        assert is_valid, playback_error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if response:
            response[RecordingAttribute.WEB_SERVICE_OBJECT].stop_server()
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9808_tc_des_015_cos_in_packet_latency():
    """
    Introduce packet latency of 500ms on each packet on the COS incoming network interface,
    trigger a recording and verify if playback has an acceptable latency ~500ms
    """
    ssh_client = None
    response = None
    web_service_obj = None
    rev_interfaces_dict = {}
    component_dict = {}
    ifb_count = 0
    cos_nodes = v2pc_helper.get_cos_node_data()

    try:
        for node in cos_nodes:
            component_ip = node[Component.IP]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME,
                                              component_ip=component_ip, password=COS_PASSWORD)

            # deleting the previously scheduled jobs,in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, component_ip, destructive.COS_JOB_IDS)

            for interface in node[Interface.INTERFACES]:
                cos_data_in = interface

                ifb_interface = DestructiveTesting.IFB_INTERFACE + str(ifb_count)
                rev_cmd = destructive_utils.schedule_rev_cmd(ssh_client, cos_data_in, component_ip,
                                                             destructive.COS_JOB_IDS,
                                                             constants.MINUTES * 2, ifb_interface)

                rev_interfaces_dict[cos_data_in] = rev_cmd

                des_cmd = DestructiveTesting.PACKET_LATENCY_INCOMING_INTERFACE.format(ifb_interface,
                                                                                      DestructiveTesting.PACKET_LATENCY)

                des_cmd = destructive_utils.get_incoming_tc_cmd(cos_data_in, des_cmd, ifb_interface)

                # expected outcome after the destructive commands are run
                expected_result = {DestructiveTesting.DELAY: DestructiveTesting.PACKET_LATENCY,
                                   DestructiveTesting.SRC: DestructiveTesting.NETWORK}
                is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, ifb_interface,
                                                                         des_cmd, expected_result)
                assert is_des_effective, error
                ifb_count += 1
            # Adding component ip and rev cmd to the dictionary
            component_dict[component_ip] = rev_interfaces_dict

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.COMPLETE], web_service_obj=web_service_obj,
            end_time=end_time)
        assert is_valid, desc
        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id, VALIDATOR_TYPE=vle_validators_configuration.PLAYBACK_VALIDATION_COMPLETE)
        assert is_valid, error

        # executing the revert command to undo the destructive commands
        for component_ip, values in component_dict.items():
            for interface, rev_cmds in values.items():
                destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, component_ip,  interface, rev_cmds,
                                               destructive.COS_JOB_IDS)

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)