def test_tc_er_006_007_incomplete_delete_playback_US62461(stream, name, copy_type):
    """
    Create multiple recordings with copy type as COMMON
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10
    service_name = V2pc.MANIFEST_AGENT
    namespace = Component.VMR

    try:
        #Taking backup of v2pc pod config info, editing the config and then restarting the services
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(service_name, namespace, "4")
        assert is_valid, error

        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 160, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)
        for i in range(diff_start_time_recordings, same_start_time_recordings+diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * 30) + i, TimeFormat.TIME_FORMAT_MS, stream)
            end_time = utils.get_formatted_time((constants.SECONDS * 160) + i, TimeFormat.TIME_FORMAT_MS, stream)
            rec_with_diff_time = recording_model.Recording(total_recordings=1, StartTime=start_time, EndTime=end_time,
                                                           copyType=copy_type, StreamId=stream)
            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                                                        rec_with_diff_time.RequestId + '_' + str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error
      
        #restarting segment recorder to make INCOMPLETE recording
        is_valid, error = delete_vmr_pods(V2pc.SEGMENT_RECORDER)
        assert is_valid, error

        #Verifying recording INCOMPLETE STATE
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_pool.apply_async(validate_recordings.validate_recording_end_state,
                                       (recording_id, [constants.RecordingStatus.INCOMPLETE]),
                                       dict(web_service_obj=web_service_objects[i], end_time=end_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        recording.Entries.pop(-1)

        # Deleting all but one recording
        response = a8.delete_recording(recording) 

        #wait for georbage collect
        archive_helper.wait_for_archival(stream, recording.get_entry(-1).RecordingId, Archive.ARCHIVE, Archive.COMPLETE)

        #Verifying playback of recording	
        is_valid, error = validate_recordings.validate_playback_using_vle(rec_with_diff_time.Entries[0].RecordingId,)
        assert is_valid, error

    finally:
        #Revert back the v2pc config changes
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(rec_with_diff_time)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Ejemplo n.º 2
0
def test_er_008_009_recording_recovery_manifest_restart_(stream, name, copy_type):
    """
    UNIQUE  and COMMON copy Recording recovery
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 150, TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)

        total_recording = same_start_time_recordings
        last_recording_id = recording.Entries[same_start_time_recordings - 1].RecordingId

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        #Verifying recording is started or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        is_valid, error = delete_vmr_pods(V2pc.MANIFEST_AGENT)
        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        end_time = utils.get_parsed_time(response[0][RecordingAttribute.END_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.COMPLETE, wait_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        #Verifying recording in storage
        for i in range(total_recording):
            response = rio.find_recording(recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE,
                                                                             Cos.RECORDING_STORED)

            assert is_valid, error
    
    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Ejemplo n.º 3
0
def test_rtc9735_er_011_hybrid_recording_recovery_manifest_restart(stream):
    """
    Hybrid (UNIQUE  and COMMON) copy Recording recovery
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 10
    common_copy_recordings = 5
    unique_copy_recordings = 5

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 120,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(
            total_recordings=common_copy_recordings,
            StartTime=start_time,
            EndTime=end_time,
            copyType=RecordingAttribute.COPY_TYPE_COMMON,
            StreamId=stream)
        for i in range(unique_copy_recordings, total_recording):
            rec_uniq = recording_model.Recording(
                total_recordings=unique_copy_recordings,
                StartTime=start_time,
                EndTime=end_time,
                copyType=RecordingAttribute.COPY_TYPE_UNIQUE,
                StreamId=stream)
            rec_uniq.Entries[
                0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + rec_uniq.RequestId + '_' + str(
                    i)
            recording.Entries.append(rec_uniq.get_entry(0))
        last_recording_id = rec_uniq.Entries[0].RecordingId

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        # Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(
                last_recording_id)
        start_time = utils.get_parsed_time(
            response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        # Verifying recording is started or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.STARTED,
                 wait_time),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Restarting manifest agent
        is_valid, error = delete_vmr_pods(V2pc.MANIFEST_AGENT)
        assert is_valid, error

        end_time = utils.get_parsed_time(
            response[0][RecordingAttribute.END_TIME][:-1])
        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.COMPLETE,
                 wait_time),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Verifying recording in storage
        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

        # Verfying Archive storage
        archive_helper.wait_for_archival(stream,
                                         recording.get_entry(0).RecordingId,
                                         Archive.ARCHIVE, Archive.COMPLETE)
        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error
            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ARCHIVE_STORAGE)

            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
Ejemplo n.º 4
0
def test_er_003_004_recording_incomplete(stream, name, copy_type):
    """
    Create multiple recordings with copy type as COMMON
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10
    recording_duration = 30  # in sec
    response_a8 = None
    service_name = V2pc.MANIFEST_AGENT
    namespace = Component.VMR

    try:
        
        # backup v2pc master config
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(service_name, namespace, "4")
        assert is_valid, error
        
    
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * recording_duration, TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time(constants.SECONDS * recording_duration * 5, TimeFormat.TIME_FORMAT_MS,
                                            stream)

        # same start time 10 recordings
        recording = recording_model.Recording(total_recordings=same_start_time_recordings, StartTime=start_time,
                                              EndTime=end_time, copyType=copy_type, StreamId=stream)

        # different start time 10 recordings
        for i in range(diff_start_time_recordings, same_start_time_recordings + diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * recording_duration)+i,
                                                  TimeFormat.TIME_FORMAT_MS, stream)
            end_time = utils.get_formatted_time((constants.SECONDS * recording_duration * 5)+i,
                                                TimeFormat.TIME_FORMAT_MS, stream)
            rec_with_diff_time = recording_model.Recording(total_recordings=diff_start_time_recordings,
                                                           StartTime=start_time, EndTime=end_time,
                                                           copyType=copy_type, StreamId=stream
                )
            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                rec_with_diff_time.RequestId + '_'+str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        # get recording id and update url
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        # create recording
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response_a8 = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response_a8, requests.codes.no_content)

        assert is_valid, error
        
        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(last_recording_id)
        start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time), constants.SECONDS)
        if wait_time < 0:
            wait_time = 0    

        # validate recording is started
        recording_pool = mp_pool.ThreadPool()  
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_notification,
                                       (web_service_objects[i], constants.RecordingStatus.STARTED, wait_time),
                                       callback=queue.put)
        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        #restarting segment recorder to make INCOMPLETE recording
        is_valid, error = delete_vmr_pods(V2pc.SEGMENT_RECORDER)
        assert is_valid, error

        # validate playback using vle
        # verify recording in progress
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_vle,
                                       (recording.get_entry(i).RecordingId,), dict(in_progress=True),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # validate playback using hls checker while recording is in progress
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_hls_checker,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            if not is_valid:
                assert 'discontinuity' in error, error
        
        #Verifying recording INCOMPLETE STATE
        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            recording_pool.apply_async(validate_recordings.validate_recording_end_state,
                                       (recording_id, [constants.RecordingStatus.INCOMPLETE]),
                                       dict(web_service_obj=web_service_objects[i], end_time=end_time),
                                       callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Playback using VLE
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_vle,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        # Playback using hls checker
        for i in range(total_recording):
            recording_pool.apply_async(validate_recordings.validate_playback_using_hls_checker,
                                       (recording.get_entry(i).RecordingId,), callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            if not is_valid:
                assert 'discontinuity' in error, error

    finally:
        is_valid, error = cleanup(redeploy_config_map, service_name, revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()
        
        if response_a8:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_HLS_Verify_codec_based_audio_filtering_STANDALONE_ipdvrtests_46(channel):
    stream = channel
    web_service_obj = None
    recording = None
    try:
        service_name = "playout-packager"
        is_valid, config_file = vmr.fetch_config_file(service_name, mpe_config[Component.NAMESPACE])
        assert is_valid, config_file
        v2pc_config_path = os.path.dirname(config_file)
        config_file_name = os.path.basename(config_file)
        update_path = os.path.join(v2pc_config_path, VMR_SERVICES_RESTART.UPDATE_PATH)
        updated_config = os.path.join(update_path, config_file_name)

        audio_codecs = utils.get_audio_codecs(stream,V2PC_EXIST)
        assert audio_codecs, "No manifest response from the given mpd url"
		
        LOGGER.info("Audio Codecs available in selected Stream are : %s", audio_codecs)
        assert len(set(audio_codecs.values())) >= 2, "there is only one audio codec format available in the selected stream"

        # Filtering out one codec from the available default codecs:
        filtered_codec = audio_codecs.items()[0][1]
        LOGGER.info("filter_codecs : %s", filtered_codec)

        Codec_disabled_payload = []
        for pid, codec in audio_codecs.items():
            if codec == filtered_codec:
                Codec_disabled_payload.append((pid, codec))

        LOGGER.info("Codec_disabled_payload : %s", Codec_disabled_payload)

        push_payload = []
        for filtered_codec in Codec_disabled_payload:
            push_payload.append({
                        'action': 'disable',
                        'pid': str(filtered_codec[0]),
                        'codec': str(filtered_codec[1]).upper(),
                        'type': 'audio',
                        'default': 'false'
                        })
     
        with open(os.path.join(v2pc_config_path, config_file_name), "r") as fp:
            dt = json.load(fp)
            workflows = json.loads(dt['data']['workflows.conf'])
            template = workflows.keys()[0]
            publish_templates = workflows[template]['assetResolver']['workflow'][0]['publishTemplates']
            for templates in publish_templates:
                if templates['name'] == 'HLS':
                    if templates.has_key('streamConfiguration'):
                        LOGGER.info("Default Payload in HLS publish template : %s", templates['streamConfiguration'])
                        #Updating Payload
                        templates['streamConfiguration'] = []
                        templates['streamConfiguration'].extend(push_payload)
                        LOGGER.info("Updated Payload after codec filtering : %s", templates['streamConfiguration']) 
            workflows[template]['assetResolver']['workflow'][0]['publishTemplates'] = publish_templates
            dt['data']['workflows.conf'] = json.dumps(workflows)
        with open(updated_config, 'w') as f:
            json.dump(dt, f, indent=4)

        # Apply the config with oc apply -f command
        redeploy_res, resp = vmr.redeploy_config_map(service_name, mpe_config[Component.NAMESPACE])
        assert redeploy_res, resp
        delete_pods, resp = vmr.delete_vmr_pods("All", mpe_config[Component.NAMESPACE])
        assert delete_pods, resp

        # Create recording
        LOGGER.info("Creating Recording")
        rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC)
        rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC)
        start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)

        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)
        assert is_valid, error
        playback_url = utils.get_mpe_playback_url(recording_id, "hls")
        LOGGER.info("playback_url : %s", playback_url)
        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)
        assert is_valid, error

        #Find recording
        LOGGER.info("Find recording in rio")
        response = rio.find_recording(recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id)
        LOGGER.info("Recording status in rio : %s", response[0]['Status'])

        #Playback and Validate Playback
        LOGGER.info("Playback Recording for the recording ID : %s", recording.get_entry(0).RecordingId)
        is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId)
        LOGGER.info("Validate recording : %s", is_valid)
        assert is_valid, error

        #Get Playback URL
        playback_url = utils.get_mpe_playback_url(recording_id)
        LOGGER.info("Playback_ulr ----- %s", playback_url)

        #Validate Filtered Codec Value in m3u8
        filtered_codecs_validation = []
        result = True
        codec_check = m3u8.load(playback_url)
        codec_check_data = codec_check.data
        LOGGER.info("m3u8 playback output :")
        LOGGER.info("codec_check_data : %s", codec_check_data)
        if (type(codec_check_data) == dict) and (codec_check_data.has_key('media')):
                filtered_codecs_validation = [media_data['codecs'] for media_data in codec_check_data['media'] if media_data['type'] == "AUDIO"]

        if filter_codecs not in filtered_codecs_validation:
            message = "Codecs filtered successfully"
        else:
            message = "filtering not happened properly"
            result = False

        assert result, message
        LOGGER.info("Testcase passed with the message : %s",message)

    finally:
        LOGGER.info("Reverting default payload...")
        vmr.redeploy_config_map(service_name, mpe_config[Component.NAMESPACE], revert=True)

        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
                                                                                          
Ejemplo n.º 6
0
def test_rtc9729_er_005_recording_incomplete_us62460(stream):
    """
    Archiving of INCOMPELTE UNIQUE copy recordings and playback
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    copy_type = RecordingAttribute.COPY_TYPE_UNIQUE
    total_rec = 3
    recording_duration = 30  # in sec

    try:
        queue = Queue.Queue()
        start_time = utils.get_formatted_time(
            constants.SECONDS * recording_duration, TimeFormat.TIME_FORMAT_MS,
            stream)
        end_time = utils.get_formatted_time(
            constants.SECONDS * recording_duration * 5,
            TimeFormat.TIME_FORMAT_MS, stream)

        # same start time 10 recordings
        recording = recording_model.Recording(total_recordings=total_rec,
                                              StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)

        last_recording_id = recording.Entries[total_rec - 1].RecordingId

        # get recording id and update url
        for i in range(total_rec):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        # create recording
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(
                last_recording_id)
        start_time = utils.get_parsed_time(
            response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        # validate recording is started
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_rec):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.STARTED,
                 wait_time),
                callback=queue.put)
        for i in range(total_rec):
            is_valid, error = queue.get()
            assert is_valid, error

        #restarting segment recorder to make INCOMPLETE recording
        is_valid, error = delete_vmr_pods(V2pc.SEGMENT_RECORDER)
        assert is_valid, error

        #Verifying recording INCOMPLETE STATE
        for i in range(total_rec):
            recording_id = recording.get_entry(i).RecordingId
            recording_pool.apply_async(
                validate_recordings.validate_recording_end_state,
                (recording_id, [constants.RecordingStatus.INCOMPLETE]),
                dict(web_service_obj=web_service_objects[i],
                     end_time=end_time),
                callback=queue.put)

        for i in range(total_rec):
            is_valid, error = queue.get()
            assert is_valid, error

        # recording should have been completed by this time
        # Verifying recording in archive storage
        archive_helper.wait_for_archival(stream,
                                         recording.get_entry(0).RecordingId,
                                         Archive.ARCHIVE, Archive.COMPLETE)
        for i in range(total_rec):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response,
                Cos.ARCHIVE_STORAGE,
                Cos.RECORDING_STORED,
                rec_status='INCOMPLETE')
            assert is_valid, error

        # Playback using VLE
        for i in range(total_rec):
            recording_pool.apply_async(
                validate_recordings.validate_playback_using_vle,
                (recording.get_entry(i).RecordingId, ),
                callback=queue.put)

        for i in range(total_rec):
            is_valid, error = queue.get()
            assert is_valid, error

    finally:
        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            if web_service_obj:
                web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)
def test_er_008_009_recording_recovery_manifest_restart_(
        stream, name, copy_type):
    """
    UNIQUE  and COMMON copy Recording recovery
    """

    web_service_objects = []
    recording_pool = None
    recording = None
    total_recording = 20
    diff_start_time_recordings = 10
    same_start_time_recordings = 10
    service_name = V2pc.MANIFEST_AGENT
    namespace = Component.VMR

    try:
        #Taking backup of v2pc pod config info and editing the config and then restarting the services
        is_valid, error = cleanup(redeploy_config_map,
                                  service_name,
                                  revert=True)
        assert is_valid, error

        is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT,
                                                    batch_size='4')
        assert is_valid, error

        is_valid, error = verify_batch_size_update(service_name, namespace,
                                                   "4")
        assert is_valid, error

        queue = Queue.Queue()
        start_time = utils.get_formatted_time(constants.SECONDS * 30,
                                              TimeFormat.TIME_FORMAT_MS,
                                              stream)
        end_time = utils.get_formatted_time(constants.SECONDS * 150,
                                            TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(
            total_recordings=same_start_time_recordings,
            StartTime=start_time,
            EndTime=end_time,
            copyType=copy_type,
            StreamId=stream)

        for i in range(diff_start_time_recordings, same_start_time_recordings +
                       diff_start_time_recordings):
            start_time = utils.get_formatted_time((constants.SECONDS * 30) + i,
                                                  TimeFormat.TIME_FORMAT_MS,
                                                  stream)
            end_time = utils.get_formatted_time((constants.SECONDS * 150) + i,
                                                TimeFormat.TIME_FORMAT_MS,
                                                stream)
            rec_with_diff_time = recording_model.Recording(
                total_recordings=1,
                StartTime=start_time,
                EndTime=end_time,
                copyType=copy_type,
                StreamId=stream)

            rec_with_diff_time.Entries[0].RecordingId = RecordingAttribute.RECORDING_ID_PREFIX + \
                                                        rec_with_diff_time.RequestId + '_' + str(i)
            recording.Entries.append(rec_with_diff_time.get_entry(0))
        last_recording_id = rec_with_diff_time.Entries[0].RecordingId

        for i in range(total_recording):
            recording_id = recording.get_entry(i).RecordingId
            web_service_objects.append(
                notification_utils.get_web_service_object(recording_id))
            recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url()

        LOGGER.debug("Recording instance created=%s", recording.serialize())

        #Sending recording request to create recording
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)

        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(
                last_recording_id)
        start_time = utils.get_parsed_time(
            response[0][RecordingAttribute.START_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((start_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        #Verifying recording is started or not
        recording_pool = mp_pool.ThreadPool()
        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.STARTED,
                 wait_time),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        is_valid, error = delete_vmr_pods(V2pc.MANIFEST_AGENT)
        assert is_valid, error

        response = rio.find_recording(last_recording_id).json()
        if not response:
            return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(
                last_recording_id)
        end_time = utils.get_parsed_time(
            response[0][RecordingAttribute.END_TIME][:-1])

        current_time = datetime.datetime.utcnow()
        wait_time = utils.add_time_to_secs((end_time - current_time),
                                           constants.SECONDS)
        if wait_time < 0:
            wait_time = 0

        for i in range(total_recording):
            recording_pool.apply_async(
                validate_recordings.validate_notification,
                (web_service_objects[i], constants.RecordingStatus.COMPLETE,
                 wait_time),
                callback=queue.put)

        for i in range(total_recording):
            is_valid, error = queue.get()
            assert is_valid, error

        #Verifying recording in storage
        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED)

            assert is_valid, error

        #Verfying Archive storage
        archive_helper.wait_for_archival(stream,
                                         recording.get_entry(0).RecordingId,
                                         Archive.ARCHIVE, Archive.COMPLETE)
        for i in range(total_recording):
            response = rio.find_recording(
                recording.get_entry(i).RecordingId).json()
            is_valid, error = validate_storage.validate_recording_in_storage(
                response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED)
            assert is_valid, error

            is_valid, error = validate_storage.validate_copy_count(
                response, Cos.ARCHIVE_STORAGE)
            assert is_valid, error

    finally:
        is_valid, error = cleanup(redeploy_config_map,
                                  service_name,
                                  revert=True)
        assert is_valid, error

        if recording_pool:
            recording_pool.close()
            recording_pool.join()
        for web_service_obj in web_service_objects:
            web_service_obj.stop_server()

        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)