Beispiel #1
0
def test_rtc9734_tc_er_010_playback_redirect_mode(stream, name, copy_type):
    """
    Playback of Active and Archive recording(UNIQUE and COMMON) with DO(Dash Origin) redirect enabled 
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream,
                                              copyType=copy_type)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)
        assert is_valid, error

        if copy_type == RecordingAttribute.COPY_TYPE_UNIQUE:
            archive_helper.wait_for_archival(stream, recording_id,
                                             Archive.ARCHIVE,
                                             Archive.IN_PROGRESS)
            response = rio.find_recording(recording_id).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

            is_valid, error = validate_recordings.validate_playback(
                recording_id)
            assert is_valid, error

    finally:
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_create_3hr_recording_ipdvrtests_64(channel, name, copy_type):
    """
    JIRA_URL : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-64
    DESCRIPTION : Create 3 hr recording
    #Partially automated
        Skipped step(s) : Step 3 - No errors seen in MA/SR pods
    """
    stream = channel
    web_service_obj = None
    recording = None
    stream_name = nsa.get_stream(stream).json()[0][constants.STREAM_NAME]
    try:
        #STEP 1 - Create UC and CC recording with longer duration ~3hr
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 10830, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)

        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)
        assert is_valid, error

        #STEP 2 - Verify recording state is complete
        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)
        assert is_valid, error

        #Find recording
        LOGGER.info("Find recording in rio")
        response = rio.find_recording(recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id)
        print "[INFO: ] Recording status in rio : ",response[0]['Status']

        #Playback and Validate Playback
        LOGGER.info("Playback Recording")
        print "\nPlayback Recording"
        print "Recording ID :",recording.get_entry(0).RecordingId
        is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId)
        print "[INFO: ] ",is_valid
        assert is_valid, error

        #STEP 4 - memsql table has correct info for UC and CC
        LOGGER.info("Find recording in rio")
        response = rio.find_recording(recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id)
        print "[INFO: ] Recording status in rio : \n"
        pprint (response, width=1)

    finally:
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9762_tc_rec_016_multiple_unique(stream):
    """
    Create multiple recordings with copy type as UNIQUE
    """
    total_recordings = 3
    web_service_objs = []
    recording_pool = None
    recording = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=total_recordings, StartTime=start_time, EndTime=end_time,
                                              StreamId=stream)

        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_recording,
                                       (recording.get_entry(i).RecordingId, web_service_objs[i]), callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recordings):
            response = rio.find_recording(recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                             Cos.RECORDING_STORED, i)

            assert is_valid, error

        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_playback,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error
    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9777_tc_arc_007_archive_playback_re_archive_unique(stream):
    """
    Create a recording with copy type as UNIQUE, wait till archival completes, playback the recording and validate
    re-archival
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)

        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.COMPLETE)
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                         Cos.RECORDING_STORED)
        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.RE_ARCHIVE, Archive.COMPLETE)

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)
        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                         Cos.RECORDING_NOT_STORED)
        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #5
0
def test_tc_des_016_vmr_ntp_out_of_sync():
    LOGGER.info("vmr_ntp_out_of_sync test case...")
    recording = None
    web_service_obj = None
    timeout = int(os.environ.get(STREAM_ID))

    ntp_server_v2pc = CONFIG_INFO[Component.V2PC][Component.NTP]
    india_ntp = '1.in.pool.ntp.org'
    ntp_synchronization_time = 300

    # Initiate the recording
    try:
        start_time = utils.get_formatted_time((constants.SECONDS * 30) + timeout, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time((constants.SECONDS * 90) + timeout, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=STREAM_ID)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)
        assert is_valid, error
        time.sleep(40)

        # Configure to the right ntp server in VMR
        cmd = "sed -i -E 's/" + ntp_server_v2pc + "/" + india_ntp + "/g' /etc/ntp.conf"
        update_vmr_ntp_server(cmd)
        time.sleep(ntp_synchronization_time)

        # Validate recording is incomplete
        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)
        LOGGER.info(is_valid)
        LOGGER.info(error)
        assert is_valid, error

        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                         Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)

        assert is_valid, error
    finally:
        # revert back the ntp to the original value in vmr
        cmd = "sed -i -E 's/" + india_ntp + "/" + ntp_server_v2pc + "/g' /etc/ntp.conf"
        update_vmr_ntp_server(cmd)
        time.sleep(ntp_synchronization_time)
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
        # Check whether all the components are synchronized.
        times_are_synchronized, error = test_setup.are_times_synchronized()
        if not times_are_synchronized:
            pytest.fail(error)
Beispiel #6
0
def test_rtc9750_tc_rec_002_future_start_time_future_end_time_common(stream):
    """
    Create a recording with future start time, future end time and COMMON copy type
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_COMMON
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)

        assert is_valid, error

        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED, 1)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)

        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #7
0
def test_rtc9744_tc_er_018_private_copy(stream, name, copy_type):
    """
    Private copy - Archive/Pre-gen
    """

    web_service_obj = None
    recording = None
    stream = str(private_copy_stream)

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 80,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        # wait for georbage collect
        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE,
                                         Archive.COMPLETE)

        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
        assert is_valid, error

    finally:
        if web_service_obj:
            web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_rtc9773_tc_arc_003_common(stream):
    """
    Create a recording with copy type as COMMON and check whether archival is happening or not
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_COMMON
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream,
                                              copyType=copy_type)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)

        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE,
                                         Archive.IN_PROGRESS)
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)

        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #9
0
def test_rtc9775_tc_arc_005_playback_archive_in_progress(stream):
    """
    Create a recording with copy type as UNIQUE and play it back when archival is in progress
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)

        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.IN_PROGRESS)
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                         [Cos.RECORDING_STORED, Cos.RECORDING_PARTIALLY_STORED])

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id)

        assert is_valid, error

        # If we playback recording when archival is INPROGRESS, There is a chance of few segments being played from
        # Active storage. This means that only part of the segments would move from archive to recon.
        # The rest will move from active to archive. So both PARTIALLY_STORED and STORED are valid
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                         [Cos.RECORDING_STORED, Cos.RECORDING_PARTIALLY_STORED])

        assert is_valid, ValidationError.SEGMENT_NOT_MOVED.format(Cos.RECON_STORAGE, recording_id)

    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #10
0
def test_rtc9756_tc_rec_010_recording_stop_after_recording_start(stream):
    """
    Create a recording and once it starts, stop it by setting the end time as the current time
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 150, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(recording_id).json()
        LOGGER.debug("Response=%s", response)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])
        current_time = datetime.datetime.utcnow()

        # wait till the recording start time
        if current_time < start_time:
            utils.wait(start_time - current_time, constants.TIME_DELTA)
        is_valid, error = validate_recordings.validate_notification(web_service_obj, constants.RecordingStatus.STARTED)

        assert is_valid, error

        time.sleep(constants.SECONDS * 60)
        recording.get_entry(0).EndTime = utils.get_formatted_time(constants.SECONDS * 0, TimeFormat.TIME_FORMAT_MS, stream)
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        time.sleep(constants.SECONDS * 4)
        is_valid, error = validate_recordings.validate_recording_end_state(recording_id, [RecordingStatus.COMPLETE],
                                                                           web_service_obj=web_service_obj)
        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #11
0
def test_sanity_hybrid_copy(stream, name, copy_type):
    """
    Schedule UNIQUE and COMMON copy on same channel and verify the recording and playback.
    """
    recording = None
    web_service_obj = None
    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 80,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream,
                                              copyType=copy_type)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE,
                                         Archive.COMPLETE)
        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED)
        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)
        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #12
0
def test_sanity_rtc10065_private_copy(stream):
    """
    Schedule Private copy and verify the recording and playback.
    """
    recording = None
    web_service_obj = None
    stream = str(private_copy_stream)
    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 80,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback(recording_id)
        assert is_valid, error
    finally:
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_tc_rec_038_013_active_asset(stream, name, total_recording):
    """
    Bulk deletion of assets in Active(20 requests)
    """
    recording = None
    web_service_objects = []
    recording_pool = None
    recording_id_list = []

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_COMMON
        recording = recording_model.Recording(total_recordings=total_recording,
                                              StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_id_list.append(recording_id)
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error
        recording_pool = mp_pool.ThreadPool(processes=total_recording)

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objects[i]),
                callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            response = rio.find_recording(recording_id_list[i]).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

        response = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        for i in range(total_recording):
            is_valid, error = validate_recordings.validate_recording_deletion(
                recording_id_list[i])
            assert is_valid, error

            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id_list[i])
            assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
                recording_id_list[i])

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #14
0
def test_er_008_009_recording_recovery_manifest_restart_(stream, name, copy_type):
    """
    UNIQUE  and COMMON copy Recording recovery
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 150, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)

        total_recording = same_start_time_recordings
        last_recording_id = recording.Entries[same_start_time_recordings - 1].RecordingId

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        #Verifying recording is started or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        is_valid, error = delete_vmr_pods(V2pc.MANIFEST_AGENT)
        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        end_time = utils.get_parsed_time(response[0][RecordingAttribute.END_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.COMPLETE, wait_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        #Verifying recording in storage
        for i in range(total_recording):
            response = rio.find_recording(recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                             Cos.RECORDING_STORED)

            assert is_valid, error
    
    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_er_006_007_incomplete_delete_playback_US62461(stream, name, copy_type):
    """
    Create multiple recordings with copy type as COMMON
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10
    service_name = V2pc.MANIFEST_AGENT
    namespace = Component.VMR

    try:
        #Taking backup of v2pc pod config info, editing the config and then restarting the services
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(service_name, namespace, "4")
        assert is_valid, error

        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 160, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)
        for i in range(diff_start_time_recordings, same_start_time_recordings+diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * 30) + i, TimeFormat.TIME_FORMAT_MS, stream)
            end_time = utils.get_formatted_time((constants.SECONDS * 160) + i, TimeFormat.TIME_FORMAT_MS, stream)
            rec_with_diff_time = recording_model.Recording(total_recordings=1, StartTime=start_time, EndTime=end_time,
                                                           copyType=copy_type, StreamId=stream)
            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                                                        rec_with_diff_time.RequestId + '_' + str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error
      
        #restarting segment recorder to make INCOMPLETE recording
        is_valid, error = delete_vmr_pods(V2pc.SEGMENT_RECORDER)
        assert is_valid, error

        #Verifying recording INCOMPLETE STATE
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_pool.apply_async(validate_recordings.validate_recording_end_state,
                                       (recording_id, [constants.RecordingStatus.INCOMPLETE]),
                                       dict(web_service_obj=web_service_objects[i], end_time=end_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        recording.Entries.pop(-1)

        # Deleting all but one recording
        response = a8.delete_recording(recording) 

        #wait for georbage collect
        archive_helper.wait_for_archival(stream, recording.get_entry(-1).RecordingId, Archive.ARCHIVE, Archive.COMPLETE)

        #Verifying playback of recording	
        is_valid, error = validate_recordings.validate_playback_using_vle(rec_with_diff_time.Entries[0].RecordingId,)
        assert is_valid, error

    finally:
        #Revert back the v2pc config changes
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(rec_with_diff_time)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #16
0
def test_rtc9782_tc_arc_012_playback_after_archival_delete_during_playback(
        stream):
    """
    Create a recording with unique copy, wait until archiving completes, playback the recording and
    delete the recording during playback
    """
    recording = None
    web_service_obj = None

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)

        assert is_valid, error

        vmr_plybk_url = utils.get_vmr_playback_url(recording_id=recording_id)
        print "[INFO: ] vmr playback url ", vmr_plybk_url
        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE,
                                         Archive.COMPLETE)
        response = rio.find_recording(recording_id).json()
        print "[INFO: ] recording details ", response
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)

        assert is_valid, error

        queue = Queue.Queue()

        playback_pool = mp_pool.ThreadPool(processes=1)
        playback_pool.apply_async(
            validate_recordings.validate_playback_using_vle, (recording_id, ),
            callback=queue.put)

        #time.sleep(10 * constants.SECONDS)

        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.RECON_STORAGE, Cos.RECORDING_STORED)

        assert is_valid, ValidationError.SEGMENT_NOT_MOVED.format(
            Cos.RECON_STORAGE, response)

        del_resp = a8.delete_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            del_resp, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id)
        assert is_valid, error

        is_valid, error = queue.get()
        assert is_valid, error

        if is_valid:
            # verifying whether the recording available in VMR
            vmr_plybk_url = utils.get_vmr_playback_url(
                recording_id=recording_id)
            resp = requests.get(vmr_plybk_url)
            assert not (
                resp.status_code == requests.codes.ok
            ), ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format(
                resp.status_code, resp.reason, resp.url)

    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #17
0
def test_rtc9726_tc_er_018_private_copy(stream, name, copy_type):
    """
    Schedule UNIQUE copy recording (20 - 10 same start/end time, 10 different start/end times), batch size 4.
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10

    try:
        #Taking backup of v2pc pod config info, editing the config and then restarting the services
        is_valid, error = cleanup(redeploy_config_map, V2pc.MANIFEST_AGENT, revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(V2pc.MANIFEST_AGENT, Component.VMR, "4")
        assert is_valid, error

        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 80, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)

        for i in range(diff_start_time_recordings, same_start_time_recordings+diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * 30)+i, TimeFormat.TIME_FORMAT_MS, stream)
            end_time = utils.get_formatted_time((constants.SECONDS * 80)+i, TimeFormat.TIME_FORMAT_MS, stream)
            rec_with_diff_time = recording_model.Recording(total_recordings=1, StartTime=start_time, EndTime=end_time,
                                                              copyType=copy_type, StreamId=stream)
            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                                                        rec_with_diff_time.RequestId + '_' + str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error


        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        #Verifying recording is started or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error


        #Verifying playback of recording in progress
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_vle,
                                       (recording.get_entry(i).RecordingId,), dict(in_progress = True),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_hls_checker,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        end_time = utils.get_parsed_time(response[0][RecordingAttribute.END_TIME][:-1])
        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        #Verifying recording is completed or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.COMPLETE, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        #Playback verification after recording completed
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_vle,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_hls_checker,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

    finally:
        #Revert back the v2pc config changes
        is_valid, error = cleanup(redeploy_config_map, V2pc.MANIFEST_AGENT, revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #18
0
def test_tc_arc_(name, stream, event):
    """
    test_tc_arc_[009_multiple_playback_one_unique-ARCHIVE -->    Create multiple recordings with copy type as UNIQUE, wait for them to get archived, playback one of the them and
    verify if the recording is present both in the ARCHIVE_STORAGE AND RECON_STORAGE
    test_tc_arc_[011_multiple_playback_one_re_archive_unique-RE_ARCHIVE -->  Create multiple recordings with copy type as UNIQUE, wait for them to get archived, playback one, and wait till it
    gets re-archived
    """
    LOGGER.info("#####################################################################")
    LOGGER.info("Starting test_tc_arc_%s", name)

    total_recordings = 3
    recording = None
    web_service_objs = []
    recording_pool = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=total_recordings, StartTime=start_time, EndTime=end_time,
                                              StreamId=stream)
        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        recording_id = recording.get_entry(0).RecordingId

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_recording,
                                       (recording.get_entry(i).RecordingId, web_service_objs[i]), callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

        archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.COMPLETE)

        for i in range(total_recordings):
            response = rio.find_recording(recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                             Cos.RECORDING_NOT_STORED, i)

            assert is_valid, error

            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                             Cos.RECORDING_STORED)

            assert is_valid, error

            is_valid, error = validate_storage.validate_copy_count(response, Cos.ARCHIVE_STORAGE)

            assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(recording_id)

        assert is_valid, error
        response = rio.find_recording(recording.get_entry(0).RecordingId).json()
        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                     Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                     Cos.RECORDING_STORED)

        assert is_valid, error
        if event == Archive.RE_ARCHIVE:
            archive_helper.wait_for_archival(stream, recording_id, Archive.RE_ARCHIVE, Archive.COMPLETE)
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.RECON_STORAGE,
                                                                             Cos.RECORDING_NOT_STORED)

            assert is_valid, error
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ARCHIVE_STORAGE,
                                                                             Cos.RECORDING_STORED)

            assert is_valid, error

        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_playback,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()

        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #19
0
def test_tc_rec(name, stream, copy_type):
    """
    Updating end time of shorter unique copy recording after shorter recording completed,
    and longer recording in progress.
    """

    total_recordings = 2
    validate_rec_wait_time = 240
    recording = None
    web_service_objs = []
    recording_pool = None
    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(
            total_recordings=total_recordings,
            StartTime=start_time,
            EndTime=end_time,
            copyType=copy_type,
            StreamId=stream)
        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()
            LOGGER.debug("Recording=%s, UpdateURL=%s", recording_id,
                         web_service_objs[i].get_url())

        recording.get_entry(1).EndTime = utils.get_formatted_time(
            constants.SECONDS * 80, TimeFormat.TIME_FORMAT_MS, stream)
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        # Validating longer recording parallel
        recording_id_1 = recording.get_entry(1).RecordingId

        # recording_pool = mp_pool.ThreadPool(processes=2)
        recording_pool = mp_pool.ThreadPool()
        recording_pool.apply_async(validate_recordings.validate_recording,
                                   (recording_id_1, web_service_objs[1]),
                                   callback=queue.put)

        # Validating shorter recording to complete
        recording_id_0 = recording.get_entry(0).RecordingId
        is_valid, error = validate_recordings.validate_recording(
            recording_id_0, web_service_objs[0])

        assert is_valid, error

        shorter_recording_res = rio.find_recording(recording_id_0).json()
        # Validate the copy count for unique/common copy
        if RecordingAttribute.COPY_TYPE_UNIQUE == copy_type:
            is_valid, error = validate_storage.validate_copy_count(
                shorter_recording_res, Cos.ACTIVE_STORAGE, 2)
        elif RecordingAttribute.COPY_TYPE_COMMON == copy_type:
            is_valid, error = validate_storage.validate_copy_count(
                shorter_recording_res, Cos.ACTIVE_STORAGE, 1)

        assert is_valid, error
        LOGGER.debug(copy_type + " copy validation success")

        # Try to update the end time after shorter recording completes
        response = rio.find_recording(recording_id_0).json()
        print "[INFO: ] response json ", response

        updated_end_time = utils.get_formatted_time(constants.SECONDS * 30,
                                                    TimeFormat.TIME_FORMAT_MS,
                                                    stream)
        recording.get_entry(0).EndTime = updated_end_time
        shorter_recording_res = a8.create_recording(recording, recording_id_0)
        response = rio.find_recording(recording_id_0).json()
        print "[INFO: ] response json ", response

        is_valid, error = validate_common.validate_http_response_status_code(
            shorter_recording_res, requests.codes.bad_request)
        print "[INFO: ] update end time after recording complete message ", error
        # assert is_valid, "CSCvc21524: - "+error

        # Validating the updated end time in response, it should not be equal
        response = rio.find_recording(recording_id_0).json()
        is_valid, error = validate_recordings.validate_time(
            response, updated_end_time, RecordingAttribute.END_TIME)

        assert not is_valid, error
        print "[INFO: ] Validating the updated end time in response, it should not be equal "
        print "[INFO: ] async validate recording wait time ", time.sleep(
            validate_rec_wait_time)
        print "[INFO: ] queue list empty ", queue.empty()

        # Validating the longer recording
        is_valid, error = queue.get(timeout=7)
        print "[INFO: ] queue value 1 ", is_valid
        assert is_valid, error
        print "[INFO: ] Validating the longer recording "

        # Playback all recordings
        for i in range(total_recordings):
            recording_pool.apply_async(validate_recordings.validate_playback,
                                       (recording.get_entry(i).RecordingId, ),
                                       callback=queue.put)
        print "[INFO: ] Playback all recordings "

        # Validate playback recordings
        for i in range(total_recordings):
            is_valid, error = queue.get()
            print "[INFO: ] queue value 2 ", is_valid
            assert is_valid, error
        print "[INFO: ] Validate playback recordings"

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9769_tc_rec_037_(total_recording, stream, name, copy_type):
    """
    Update the end time of a recording after it starts
    """
    recording = None
    web_service_objects = []
    recording_pool = None
    recording_id_list = []

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 70,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=total_recording,
                                              StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream,
                                              copyType=copy_type)

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_id_list.append(recording.get_entry(i).RecordingId)
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
            LOGGER.debug("Recording instance created=%s",
                         recording.serialize())

        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        # wait till the STARTED notification
        response = rio.find_recording(recording_id).json()
        LOGGER.debug("Response=%s", response)
        start_time = utils.get_parsed_time(
            response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        if current_time < start_time:
            utils.wait(start_time - current_time, constants.TIME_DELTA)
        for i in range(total_recording):
            is_valid, error = validate_recordings.validate_notification(
                web_service_objects[i], constants.RecordingStatus.STARTED)
            assert is_valid, error

        # Updating the end time
        for i in range(total_recording):
            response = rio.find_recording(recording_id_list[i]).json()
            LOGGER.debug("Response=%s", response)
            end_time = response[0][RecordingAttribute.END_TIME][:-1]
            LOGGER.debug("Scheduled end time of recording=%s is %s",
                         recording_id, end_time)
            recording.get_entry(i).EndTime = utils.get_formatted_time(
                constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream)
            new_end_time = recording.get_entry(i).EndTime[:-1]
            LOGGER.debug("Updated end time of recording=%s to %s",
                         recording_id,
                         recording.get_entry(i).EndTime)

        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        recording_pool = mp_pool.ThreadPool(processes=total_recording)
        end_time = utils.get_parsed_time(new_end_time)
        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time),
                                           constants.SECONDS)

        if wait_time < 0:
            wait_time = 0

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.COMPLETE,
                 wait_time),
                callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

            if copy_type == RecordingAttribute.COPY_TYPE_COMMON:
                is_valid, error = validate_storage.validate_recording_in_storage(
                    response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED, 1)

                assert is_valid, error

        if copy_type == RecordingAttribute.COPY_TYPE_UNIQUE:
            archive_helper.wait_for_archival(
                stream,
                recording.get_entry(0).RecordingId, Archive.ARCHIVE,
                Archive.COMPLETE)
            for i in range(total_recording):
                response = rio.find_recording(
                    recording.get_entry(i).RecordingId).json()
                is_valid, error = validate_storage.validate_recording_in_storage(
                    response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
                assert is_valid, error
                is_valid, error = validate_storage.validate_copy_count(
                    response, Cos.ARCHIVE_STORAGE)

                assert is_valid, error

        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback,
                                       (recording.get_entry(i).RecordingId, ),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            response = rio.find_recording(recording_id_list[i]).json()
            is_valid, error = validate_recordings.validate_actual_time(
                response,
                recording.get_entry(i).EndTime,
                RecordingAttribute.ACTUAL_END_TIME)
            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #21
0
def test_er_003_004_recording_incomplete(stream, name, copy_type):
    """
    Create multiple recordings with copy type as COMMON
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10
    recording_duration = 30  # in sec
    response_a8 = None
    service_name = V2pc.MANIFEST_AGENT
    namespace = Component.VMR

    try:
        
        # backup v2pc master config
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(service_name, namespace, "4")
        assert is_valid, error
        
    
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * recording_duration, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * recording_duration * 5, TimeFormat.TIME_FORMAT_MS,
                                            stream)

        # same start time 10 recordings
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)

        # different start time 10 recordings
        for i in range(diff_start_time_recordings, same_start_time_recordings + diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * recording_duration)+i,
                                                  TimeFormat.TIME_FORMAT_MS, stream)
            end_time = utils.get_formatted_time((constants.SECONDS * recording_duration * 5)+i,
                                                TimeFormat.TIME_FORMAT_MS, stream)
            rec_with_diff_time = recording_model.Recording(total_recordings=diff_start_time_recordings,
                                                           StartTime=start_time, EndTime=end_time,
                                                           copyType=copy_type, StreamId=stream
                )
            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                rec_with_diff_time.RequestId + '_'+str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        # get recording id and update url
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        # create recording
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response_a8 = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response_a8, requests.codes.no_content)

        assert is_valid, error
        
        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0    

        # validate recording is started
        recording_pool = mp_pool.ThreadPool()  
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        #restarting segment recorder to make INCOMPLETE recording
        is_valid, error = delete_vmr_pods(V2pc.SEGMENT_RECORDER)
        assert is_valid, error

        # validate playback using vle
        # verify recording in progress
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_vle,
                                       (recording.get_entry(i).RecordingId,), dict(in_progress=True),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # validate playback using hls checker while recording is in progress
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_hls_checker,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            if not is_valid:
                assert 'discontinuity' in error, error
        
        #Verifying recording INCOMPLETE STATE
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_pool.apply_async(validate_recordings.validate_recording_end_state,
                                       (recording_id, [constants.RecordingStatus.INCOMPLETE]),
                                       dict(web_service_obj=web_service_objects[i], end_time=end_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Playback using VLE
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_vle,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Playback using hls checker
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_hls_checker,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            if not is_valid:
                assert 'discontinuity' in error, error

    finally:
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()
        
        if response_a8:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #22
0
def test_rtc9788_bulk_update_end_time_during_recording_inprogress_unique(
        total_recording, stream, name, copy_type):
    """
    TC9788 : Bulk update of recording requests of end time during recording(20 requests ) unique copy
    """
    recording = None
    web_service_objects = []
    recording_pool = None
    recording_id_list = []

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 180,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=total_recording,
                                              StartTime=start_time,
                                              EndTime=end_time,
                                              StreamId=stream,
                                              copyType=copy_type)

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_id_list.append(recording_id)
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)

        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error
        recording_pool = mp_pool.ThreadPool(processes=total_recording)

        start_wait = constants.RECORDING_DELAY + 30
        sleep_time = utils.get_sleep_time(start_wait, stream)
        print "[INFO: ] Waiting %d seconds for recording to start" % sleep_time
        LOGGER.debug("Waiting %d seconds for recording to start", sleep_time)
        time.sleep(sleep_time)

        for i in range(total_recording):
            response = rio.find_recording(recording_id_list[i]).json()
            LOGGER.debug("Response=%s", response)
            end_time = response[0][RecordingAttribute.END_TIME][:-1]

            LOGGER.debug("Scheduled end time of recording=%s is %s",
                         recording_id_list[i], end_time)
            recording.get_entry(i).EndTime = utils.get_formatted_time(
                constants.SECONDS * 70, TimeFormat.TIME_FORMAT_MS, stream)

            LOGGER.debug("Updated end time of recording=%s to %s",
                         recording_id_list[i],
                         recording.get_entry(i).EndTime)

        # Update the recording with the updated end time
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        # Validating whether the updated end time was populated or not
        for i in range(total_recording):
            response = rio.find_recording(recording_id_list[i]).json()
            LOGGER.debug("Response=%s", response)
            is_valid, error = validate_recordings.validate_time(
                response,
                recording.get_entry(i).EndTime, RecordingAttribute.END_TIME)
            assert is_valid, error

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objects[i]),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        if len(web_service_objects):
            for web_service_obj in web_service_objects:
                web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_rtc9799_tc_ply_008_playback_pause_resume(stream):
    """
    Create a recording, pause and resume during playback and verify if playback was successful
    """
    recording = None
    web_service_obj = None
    pause_trigger = 2
    pause_duration = 5

    try:
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_COMMON
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)

        assert is_valid, error

        response = rio.find_recording(recording_id).json()
        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

        assert is_valid, error

        is_valid, error = validate_storage.validate_recording_in_storage(
            response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED, 1)

        assert is_valid, error

        vle_request_params = {
            Vle.TRICKMODES:
            '{0},{1},{2},{3}'.format(Vle.TRICKMODE_PAUSE, pause_trigger,
                                     pause_duration, Vle.PAUSE_WRT_SEGMENT)
        }
        is_valid, error = validate_recordings.validate_playback(
            recording_id,
            VLE_REQUEST_PARAMS=vle_request_params,
            VALIDATOR_TYPE=vle_validators_configuration.
            PLAYBACK_VALIDATION_TRICKMODE)
        assert is_valid, error
    finally:
        web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_update_recording_ipdvrtests_48(common_lib, stream):
    """
    JIRA_URL    : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-48
    DESCRIPTION : "Update Recording"
    """
    recording = None
    web_service_obj = None
    try:
        #step 1:Create a recording and update the end time. Alternatively update start time if original start time is in future
        ##creating a recording
        recording, web_service_obj = common_lib.create_recording(stream)

        ##change the start time of the recording
        start_time_new = utils.get_formatted_time(constants.SECONDS * 80,
                                                  TimeFormat.TIME_FORMAT_MS,
                                                  stream)
        recording.get_entry(0).StartTime = start_time_new
        ##change the end time of the recording
        end_time_new = utils.get_formatted_time(constants.SECONDS * 110,
                                                TimeFormat.TIME_FORMAT_MS,
                                                stream)
        recording.get_entry(0).EndTime = end_time_new
        response = a8.create_recording(recording)
        recording_id = recording.get_entry(0).RecordingId
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        ##verify the recording complete
        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

        #step 2:Verify memsql table is updated with new time using RIO API
        response = rio.find_recording(recording_id)
        resp = json.loads(response.content)
        is_valid, error = validate_recordings.validate_time(
            resp, start_time_new, RecordingAttribute.START_TIME)
        assert is_valid, error
        is_valid, error = validate_recordings.validate_time(
            resp, end_time_new, RecordingAttribute.END_TIME)
        assert is_valid, error

        #step 3:Check cos logs to see segments are written using COS API
        is_valid, error = validate_storage.validate_recording_in_storage(
            resp, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)
        assert is_valid, error

        #step 4:Check MA/SR pod logs for any errors while recording
        s_time = utils.get_parsed_time(str(start_time_new)[:-1])
        e_time = utils.get_parsed_time(str(end_time_new)[:-1])
        is_valid, error = vmr_helper.verify_error_logs_in_vmr(
            stream,
            'manifest-agent',
            'vmr',
            search_string="ERROR",
            start_time=s_time,
            end_time=e_time)
        assert is_valid, error
        is_valid, error = vmr_helper.verify_error_logs_in_vmr(
            stream,
            'segment-recorder',
            'vmr',
            search_string="ERROR",
            start_time=s_time,
            end_time=e_time)
        assert is_valid, error

        #step 5:Check no discontinuity errors in MA
        is_valid, error = vmr_helper.verify_error_logs_in_vmr(
            stream,
            'manifest-agent',
            'vmr',
            search_string="discontinuity",
            start_time=s_time,
            end_time=e_time)
        assert is_valid, error

    finally:
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
Beispiel #25
0
def test_rtc9813_tc_des_006_dcm_multi_cast_ip_packet_loss_recording():
    """
    Introduce 30% packet loss on the Incoming MCE network interface throughout the recording life time,
    Validate recording state against INCOMPLETE or COMPLETE and number of available segments recorded.
    """
    ssh_client = None
    response = None
    web_service_obj = None
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_in = mce_node[Interface.DATA_IN]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS)

            if mce_node[Interface.DATA_IN] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip,
                                                                      destructive.MCE_JOB_IDS, constants.MINUTES * 10)
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_in, mce_ip, destructive.MCE_JOB_IDS, constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

            des_cmd = DestructiveTesting.PACKET_LOSS_INCOMING_INTERFACE.format(DestructiveTesting.IFB_INTERFACE,
                                                                               DestructiveTesting.PACKET_LOSS)
            des_cmd = destructive_utils.get_incoming_tc_cmd(mce_data_in, des_cmd)

            # expected outcome after the destructive commands are run
            expected_result = {DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS,
                               DestructiveTesting.SRC: DestructiveTesting.NETWORK}
            is_des_effective, error = destructive_utils.exec_des_cmd(ssh_client, DestructiveTesting.IFB_INTERFACE,
                                                                     des_cmd, expected_result)
            assert is_des_effective, error

        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        recording_id = response[RecordingAttribute.RECORDING_ID]
        recording_response = rio.find_recording(recording_id).json()
        LOGGER.debug("Recording response=%s", recording_response)

        web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT]
        is_valid, desc = validate_recordings.validate_recording_end_state(
            recording_id, [RecordingStatus.INCOMPLETE, RecordingStatus.COMPLETE], web_service_obj=web_service_obj,
            end_time=end_time)

        assert is_valid, desc
        if is_valid and desc == RecordingStatus.COMPLETE:
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id, VALIDATOR_TYPE=vle_validators_configuration.PLAYBACK_VALIDATION_COMPLETE)
            assert is_valid, error

        # executing the revert command to undo the destructive commands
        for mce_node in mce_nodes:
            ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_node[Component.IP])
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(COMPONENT_NAME, ssh_client, mce_ip,
                                                                      DestructiveTesting.IFB_INTERFACE, rev_cmds[mce_ip],
                                                                      destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(ssh_client, DestructiveTesting.IFB_INTERFACE)
            assert rev_effective, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if web_service_obj:
            web_service_obj.stop_server()
        if response:
            response = a8.delete_recording(response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #26
0
def test_tc_arc(name, stream, archive_playback):
    """
    Schedule recording with same start time, end time with unique copy,
    playback recording single recording and delete it, finally playback remaining records.
    """
    total_recordings = 3
    recording = None
    web_service_objs = []
    recording_pool = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        copy_type = RecordingAttribute.COPY_TYPE_UNIQUE
        recording = recording_model.Recording(
            total_recordings=total_recordings,
            StartTime=start_time,
            EndTime=end_time,
            copyType=copy_type,
            StreamId=stream)
        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()
            LOGGER.debug("Recording=%s, UpdateURL=%s", recording_id,
                         web_service_objs[i].get_url())

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objs[i]),
                callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error
        # Validating the copy count of unique copy recording in active storage
        for i in range(total_recordings):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ACTIVE_STORAGE, total_recordings)
            assert is_valid, error

        # Wait till archival completes
        recording_id_0 = recording.get_entry(0).RecordingId
        archive_helper.wait_for_archival(stream, recording_id_0,
                                         Archive.ARCHIVE, Archive.COMPLETE)
        # Validating copy count in archive storage after archival duration
        for i in range(total_recordings):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED)

            assert is_valid, error

            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ARCHIVE_STORAGE, 1)
            assert is_valid, error

        if archive_playback:
            response = rio.find_recording(recording_id_0).json()
            # Validating the first recording for playback
            is_valid, error = validate_recordings.validate_playback_using_vle(
                recording_id_0)
            assert is_valid, error

            # Validating the copy of segments in the archive folder after playback
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

            # Validating the segments in recon folder after playback
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.RECON_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

        response = a8.delete_recording(recording,
                                       recording.get_entry(0).RecordingId)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id_0)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id_0)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id_0)

        del recording.get_entries()[:1]
        # Check remaining records still in archive
        if archive_playback:
            for recording_entry in recording.get_entries():
                response = rio.find_recording(
                    recording_entry.RecordingId).json()
                recording_pool.apply_async(
                    validate_storage.validate_recording_in_storage,
                    (response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED),
                    callback=queue.put)

            for i in range(len(recording.get_entries())):
                is_valid, error = queue.get()
                assert is_valid, error

        for recording_entry in recording.get_entries():
            recording_pool.apply_async(
                validate_recordings.validate_playback_using_vle,
                (recording_entry.RecordingId, ),
                callback=queue.put)

        for i in range(len(recording.get_entries())):
            is_valid, error = queue.get()
            assert is_valid, error
    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()
        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #27
0
def test_rtc9763_tc_rec_027_multiple_delete_playback_unique(stream):
    """
    Create multiple recordings with copy type as UNIQUE, delete one and playback the rest
    """
    total_recordings = 3
    web_service_objs = []
    playback_pool = None
    recording = None

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(
            total_recordings=total_recordings,
            StartTime=start_time,
            EndTime=end_time,
            StreamId=stream)

        for i in range(total_recordings):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objs.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objs[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        recording_id_0 = recording.get_entry(0).RecordingId

        recording_pool = mp_pool.ThreadPool(processes=total_recordings)
        for i in range(total_recordings):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objs[i]),
                callback=queue.put)

        for i in range(total_recordings):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recordings):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ACTIVE_STORAGE, total_recordings)

            assert is_valid, error

        recording_to_delete = copy.deepcopy(recording)
        del recording_to_delete.get_entries()[1:]
        del recording.get_entries()[:1]  # to clean up recordings later
        response = a8.delete_recording(recording_to_delete)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording_deletion(
            recording_id_0)

        assert is_valid, error

        is_valid, error = validate_recordings.validate_playback_using_vle(
            recording_id_0)

        assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(
            recording_id_0)

        playback_pool = mp_pool.ThreadPool(
            processes=len(recording.get_entries()))
        for recording_entry in recording.get_entries():
            playback_pool.apply_async(
                validate_recordings.validate_playback_using_vle,
                (recording_entry.RecordingId, ),
                callback=queue.put)

        for i in range(len(recording.get_entries())):
            is_valid, error = queue.get()
            assert is_valid, error
    finally:
        if playback_pool:
            playback_pool.close()
            playback_pool.join()
        for web_service_obj in web_service_objs:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
Beispiel #28
0
def test_rtc9801_tc_des_008_mce_ni_block_pending_recording():
    """
    Block traffic on the outgoing MCE interface, trigger a recording(4 minutes) and unblock the interface after 2 minutes
    Check if the recording is INCOMPLETE. Verify the playback of recording
    """
    ssh_client = None
    response = None
    start_duration = 30
    end_duration = 270
    try:
        rev_cmds = {}
        mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME)

        for mce_node in mce_nodes:
            mce_data_out = mce_node[Interface.DATA_OUT]
            mce_ip = mce_node[Component.IP]

            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)

            # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case
            destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS)

            des_cmd = DestructiveTesting.PACKET_LOSS_OUTGOING_INTERFACE.format(
                mce_data_out, DestructiveTesting.PACKET_LOSS_BLOCK)
            des_cmd = destructive_utils.get_outgoing_tc_cmd(
                mce_data_out, des_cmd)

            if mce_node[Interface.DATA_OUT] != mce_node[Interface.MGMT]:
                rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd(
                    ssh_client, mce_data_out, mce_ip, destructive.MCE_JOB_IDS,
                    constants.MINUTES * 10)

                expected_result = {
                    DestructiveTesting.LOSS:
                    DestructiveTesting.PACKET_LOSS_BLOCK,
                    DestructiveTesting.DST: DestructiveTesting.NETWORK
                }
                is_des_effective, error = destructive_utils.exec_des_cmd(
                    ssh_client, mce_data_out, des_cmd, expected_result)
                assert is_des_effective, error
            else:
                destructive_utils.schedule_rev_cmd(ssh_client, mce_data_out,
                                                   mce_ip,
                                                   destructive.MCE_JOB_IDS,
                                                   constants.MINUTES * 2)
                rev_cmds[mce_ip] = None

                LOGGER.info(
                    "Executing the command=%s to cause destruction in the component",
                    des_cmd)
                ssh_client.exec_command(des_cmd)

        start_time = utils.get_formatted_time(
            constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS,
            STREAM_ID)
        end_time = utils.get_formatted_time(constants.SECONDS * end_duration,
                                            TimeFormat.TIME_FORMAT_MS,
                                            STREAM_ID)
        response = destructive_utils.create_recording_des(start_time, end_time)

        time.sleep(end_duration + constants.TIME_DELTA)

        for mce_node in mce_nodes:
            mce_ip = mce_node[Component.IP]
            mce_data_out = mce_node[Interface.DATA_OUT]
            ssh_client = utils.get_ssh_client(COMPONENT_NAME,
                                              COMPONENT_USERNAME,
                                              component_ip=mce_ip)
            if rev_cmds[mce_ip]:
                rev_effective, error = destructive_utils.exec_rev_cmd(
                    COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip],
                    mce_data_out, destructive.MCE_JOB_IDS)
            else:
                rev_effective, error = destructive_utils.is_rev_effective(
                    ssh_client, mce_data_out)
            assert rev_effective, error

        is_valid, rec_error = validate_recordings.validate_recording_end_state(
            response[RecordingAttribute.RECORDING_ID],
            [RecordingStatus.INCOMPLETE],
            web_service_obj=response[RecordingAttribute.WEB_SERVICE_OBJECT])

        recording_response = rio.find_recording(
            response[RecordingAttribute.RECORDING_ID]).json()
        LOGGER.debug("Recording response=%s", recording_response)

        assert is_valid, rec_error

        # validate playback to check if available segments are recorded
        is_valid, error = validate_recordings.validate_playback(
            response[RecordingAttribute.RECORDING_ID])

        assert is_valid, error

        # running sanity test to check if the setup is back to normal after reverting the commands
        test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID)
    finally:
        if ssh_client:
            ssh_client.close()
        if response:
            response[RecordingAttribute.WEB_SERVICE_OBJECT].stop_server()
            response = a8.delete_recording(
                response[RecordingAttribute.RECORDING])
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_tc_rec_033_015_(total_recording, stream, name, copy_type):
    """
    Create multiple recordings with copy type as COMMON
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 60,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=total_recording,
                                              StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        recording_pool = mp_pool.ThreadPool(processes=total_recording)
        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_recording,
                (recording.get_entry(i).RecordingId, web_service_objects[i]),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

            if copy_type == RecordingAttribute.COPY_TYPE_COMMON:
                is_valid, error = validate_storage.validate_recording_in_storage(
                    response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED, 1)

                assert is_valid, error

        if copy_type == RecordingAttribute.COPY_TYPE_UNIQUE:
            archive_helper.wait_for_archival(
                stream,
                recording.get_entry(0).RecordingId, Archive.ARCHIVE,
                Archive.COMPLETE)
            for i in range(total_recording):
                response = rio.find_recording(
                    recording.get_entry(i).RecordingId).json()
                is_valid, error = validate_storage.validate_recording_in_storage(
                    response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
                assert is_valid, error
                is_valid, error = validate_storage.validate_copy_count(
                    response, Cos.ARCHIVE_STORAGE)

                assert is_valid, error

        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback,
                                       (recording.get_entry(i).RecordingId, ),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error
    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            web_service_obj.stop_server()

        response = a8.delete_recording(recording)
        LOGGER.debug("Recording clean up status code=%s", response.status_code)
Beispiel #30
0
def test_rtc9735_er_011_hybrid_recording_recovery_manifest_restart(stream):
    """
    Hybrid (UNIQUE  and COMMON) copy Recording recovery
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 10
    common_copy_recordings = 5
    unique_copy_recordings = 5

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 120,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(
            total_recordings=common_copy_recordings,
            StartTime=start_time,
            EndTime=end_time,
            copyType=RecordingAttribute.COPY_TYPE_COMMON,
            StreamId=stream)
        for i in range(unique_copy_recordings, total_recording):
            rec_uniq = recording_model.Recording(
                total_recordings=unique_copy_recordings,
                StartTime=start_time,
                EndTime=end_time,
                copyType=RecordingAttribute.COPY_TYPE_UNIQUE,
                StreamId=stream)
            rec_uniq.Entries[
                0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + rec_uniq.RequestId + '_' + str(
                    i)
            recording.Entries.append(rec_uniq.get_entry(0))
        last_recording_id = rec_uniq.Entries[0].RecordingId

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        # Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(
                last_recording_id)
        start_time = utils.get_parsed_time(
            response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        # Verifying recording is started or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.STARTED,
                 wait_time),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Restarting manifest agent
        is_valid, error = delete_vmr_pods(V2pc.MANIFEST_AGENT)
        assert is_valid, error

        end_time = utils.get_parsed_time(
            response[0][RecordingAttribute.END_TIME][:-1])
        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.COMPLETE,
                 wait_time),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Verifying recording in storage
        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

        # Verfying Archive storage
        archive_helper.wait_for_archival(stream,
                                         recording.get_entry(0).RecordingId,
                                         Archive.ARCHIVE, Archive.COMPLETE)
        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error
            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ARCHIVE_STORAGE)

            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)