def test_rtc9734_tc_er_010_playback_redirect_mode(stream, name, copy_type): """ Playback of Active and Archive recording(UNIQUE and COMMON) with DO(Dash Origin) redirect enabled """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream, copyType=copy_type) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error if copy_type == RecordingAttribute.COPY_TYPE_UNIQUE: archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.IN_PROGRESS) response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback( recording_id) assert is_valid, error finally: if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9752_tc_rec_004_past_start_time_current_end_time(stream): """ Create a recording with past start time and current end time """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * -120, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) minimum_start_time = datetime.datetime.utcnow() + datetime.timedelta(0, constants.MINIMUM_SEGMENT_TIME_DIFF) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error is_valid, status = validate_recordings.validate_recording(recording_id, web_service_obj, minimum_start_time) assert is_valid, status if status == constants.RecordingStatus.COMPLETE: is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_create_3hr_recording_ipdvrtests_64(channel, name, copy_type): """ JIRA_URL : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-64 DESCRIPTION : Create 3 hr recording #Partially automated Skipped step(s) : Step 3 - No errors seen in MA/SR pods """ stream = channel web_service_obj = None recording = None stream_name = nsa.get_stream(stream).json()[0][constants.STREAM_NAME] try: #STEP 1 - Create UC and CC recording with longer duration ~3hr start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 10830, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error #STEP 2 - Verify recording state is complete is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error #Find recording LOGGER.info("Find recording in rio") response = rio.find_recording(recording_id).json() if not response: return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id) print "[INFO: ] Recording status in rio : ",response[0]['Status'] #Playback and Validate Playback LOGGER.info("Playback Recording") print "\nPlayback Recording" print "Recording ID :",recording.get_entry(0).RecordingId is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId) print "[INFO: ] ",is_valid assert is_valid, error #STEP 4 - memsql table has correct info for UC and CC LOGGER.info("Find recording in rio") response = rio.find_recording(recording_id).json() if not response: return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id) print "[INFO: ] Recording status in rio : \n" pprint (response, width=1) finally: if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9778_tc_arc_008_delete_after_archive_complete(stream): """ Create a recording with copy type as UNIQUE and delete it after archival completes """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.COMPLETE) response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error response = a8.delete_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording_deletion( recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9816_tc_rec_007_update_stream_id_before_recording_start(stream): """ Create a recording and update the stream ID before the recording starts """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 120, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 150, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error time.sleep(constants.SECONDS * 60) stream_list_rem = [x for x in STREAM_ID_LIST if x != stream] assert stream_list_rem, ValidationError.STREAM_NOT_CONFIGURED recording.get_entry(0).StreamId = stream_list_rem[0] # Update the previously created recording with the new stream ID response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_streams.validate_stream_id( recording_id, stream_list_rem[0]) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_des_016_vmr_ntp_out_of_sync(): LOGGER.info("vmr_ntp_out_of_sync test case...") recording = None web_service_obj = None timeout = int(os.environ.get(STREAM_ID)) ntp_server_v2pc = CONFIG_INFO[Component.V2PC][Component.NTP] india_ntp = '1.in.pool.ntp.org' ntp_synchronization_time = 300 # Initiate the recording try: start_time = utils.get_formatted_time((constants.SECONDS * 30) + timeout, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * 90) + timeout, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=STREAM_ID) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error time.sleep(40) # Configure to the right ntp server in VMR cmd = "sed -i -E 's/" + ntp_server_v2pc + "/" + india_ntp + "/g' /etc/ntp.conf" update_vmr_ntp_server(cmd) time.sleep(ntp_synchronization_time) # Validate recording is incomplete is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) LOGGER.info(is_valid) LOGGER.info(error) assert is_valid, error response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: # revert back the ntp to the original value in vmr cmd = "sed -i -E 's/" + india_ntp + "/" + ntp_server_v2pc + "/g' /etc/ntp.conf" update_vmr_ntp_server(cmd) time.sleep(ntp_synchronization_time) web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code) # Check whether all the components are synchronized. times_are_synchronized, error = test_setup.are_times_synchronized() if not times_are_synchronized: pytest.fail(error)
def test_rtc9750_tc_rec_002_future_start_time_future_end_time_common(stream): """ Create a recording with future start time, future end time and COMMON copy type """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_COMMON recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED, 1) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9737_tc_er_013_vertical_grouping(stream, name, copy_type): """ Vertical Grouping - Enable vertical grouping on all profiles in MA. """ web_service_obj = None recording = None service_name = V2pc.MANIFEST_AGENT stream_name = nsa.get_stream(stream).json()[0][constants.STREAM_NAME] profile_data = v2pc.get_all_stream_profile_data(stream_name) assert len(profile_data)>=3, "Vertical grouping required minimum 3 profile for a stream" try: #Taking backup of v2pc pod config info and editing the config and then restarting the services is_valid, error = cleanup(redeploy_config_map, service_name, revert=True) assert is_valid, error is_valid, error = v2pc_edit_manifest_config(V2pc.MANIFEST_AGENT, vertical_grouping="*") assert is_valid, error start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error is_valid, error = verify_vertical_grouping(recording_id) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: #Revert back the v2pc config changes is_valid, error = cleanup(redeploy_config_map, service_name, revert=True) assert is_valid, error if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9773_tc_arc_003_common(stream): """ Create a recording with copy type as COMMON and check whether archival is happening or not """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_COMMON recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream, copyType=copy_type) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.IN_PROGRESS) response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_sanity_hybrid_copy(stream, name, copy_type): """ Schedule UNIQUE and COMMON copy on same channel and verify the recording and playback. """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 80, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream, copyType=copy_type) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error archive_helper.wait_for_archival(stream, recording_id, Archive.ARCHIVE, Archive.COMPLETE) response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED) assert is_valid, error is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ARCHIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_ply_lang(name, stream, audio_lang): """ Playback recording using different audio languages like eng, fre and spa and validate playback """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_UNIQUE recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error vle_request_params = {} LOGGER.debug( "Configuring vle to playback with clients=%s and audio language=%s", 1, audio_lang) vle_request_params[Vle.CLIENTS_NUM_MAX] = 1 vle_request_params[Vle.AUDIO_LANGUAGE] = audio_lang is_valid, error = validate_recordings.validate_playback( recording_id, VLE_REQUEST_PARAMS=vle_request_params) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_sanity_rtc10065_private_copy(stream): """ Schedule Private copy and verify the recording and playback. """ recording = None web_service_obj = None stream = str(private_copy_stream) try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 80, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_ply_stream_style(name, stream, stream_style): """ Playback vle using multiple/single bit rates and using different stream styles """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_UNIQUE recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error vle_request_params = {} LOGGER.debug("Configuring vle to playback with clients=%s and stream style=%s", 1, stream_style) vle_request_params[Vle.CLIENTS_NUM_MAX] = 1 vle_request_params[Vle.STREAM_STYLE] = stream_style if Vle.STREAM_STYLE_DEFAULT == stream_style: is_valid, msg = utils.get_video_profiles_from_m3u8(recording_id) assert is_valid, msg # If Valid, msg will contain the list of video profiles assert msg, ValidationError.VIDEO_PROFILES_NOT_FOUND.format(recording_id) vle_request_params[Vle.DOWNLOAD_BITRATE] = msg[0] is_valid, error = validate_recordings.validate_playback(recording_id, VLE_REQUEST_PARAMS=vle_request_params) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9771_tc_ply_009_playback_subtitles(stream): """ Playback recording with subtitles and validate playback in Direct Mode """ recording = None web_service_obj = None try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error vle_request_params = {} LOGGER.debug( "Configuring vle to playback with subtitle in VMR direct mode") vle_request_params[Vle.VLE_CALL_FLOW_KEY] = Vle.VLE_CALL_FLOW_SUBTITLE is_valid, error = validate_recordings.validate_playback( recording_id, VLE_REQUEST_PARAMS=vle_request_params) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9753_tc_rec_006_current_start_time_future_end_time(stream): """ Create a recording with current start time and future end time """ recording = None web_service_obj = None try: end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, status = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, status if status == constants.RecordingStatus.COMPLETE: is_valid, error = validate_recordings.validate_playback( recording_id) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_rec_033_015_(total_recording, stream, name, copy_type): """ Create multiple recordings with copy type as COMMON """ web_service_objects = [] recording_pool = None recording = None try: print "total recording..............\n" print total_recording queue = Queue.Queue() start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(total_recordings=total_recording, StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) for i in range(total_recording): recording_id = recording.get_entry(i).RecordingId web_service_objects.append( notification_utils.get_web_service_object(recording_id)) recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error recording_pool = mp_pool.ThreadPool(processes=total_recording) for i in range(total_recording): recording_pool.apply_async( validate_recordings.validate_recording, (recording.get_entry(i).RecordingId, web_service_objects[i]), callback=queue.put) for i in range(total_recording): is_valid, error = queue.get() assert is_valid, error for i in range(total_recording): response = rio.find_recording( recording.get_entry(i).RecordingId).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error #time.sleep(20) #THIS SLEEP CAUSES THE TEST TO FAIL IF THE ARCHIVE TIME IS SET TO 4 MINUTES for i in range(total_recording): is_valid, error = validate_recordings.validate_playback( recording.get_entry(i).RecordingId) assert is_valid, error finally: if recording_pool: recording_pool.close() recording_pool.join() for web_service_obj in web_service_objects: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9804_tc_des_011_mce_ni_packet_latency(): """ Introduce the latency on each packet on the outgoing MCE Interface, trigger a recording and verify if the recording is successful """ ssh_client = None response = None web_service_obj = None start_duration = 30 try: rev_cmds = {} mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME) for mce_node in mce_nodes: mce_data_out = mce_node[Interface.DATA_OUT] mce_ip = mce_node[Component.IP] ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip) # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS) rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd( ssh_client, mce_data_out, mce_ip, destructive.MCE_JOB_IDS, constants.MINUTES * 10) des_cmd = DestructiveTesting.PACKET_LATENCY_OUTGOING_INTERFACE.format( mce_data_out, DestructiveTesting.PACKET_LATENCY) des_cmd = destructive_utils.get_outgoing_tc_cmd( mce_data_out, des_cmd) # expected outcome after the destructive commands are run expected_result = { DestructiveTesting.DELAY: DestructiveTesting.PACKET_LATENCY, DestructiveTesting.DST: DestructiveTesting.NETWORK } is_des_effective, error = destructive_utils.exec_des_cmd( ssh_client, mce_data_out, des_cmd, expected_result) assert is_des_effective, error start_time = utils.get_formatted_time( constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS, STREAM_ID) end_time = utils.get_formatted_time(constants.SECONDS * 90, TimeFormat.TIME_FORMAT_MS, STREAM_ID) response = destructive_utils.create_recording_des(start_time, end_time) web_service_obj = response[RecordingAttribute.WEB_SERVICE_OBJECT] recording_id = response[RecordingAttribute.RECORDING_ID] is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error # executing the revert command to undo the destructive commands for mce_node in mce_nodes: mce_ip = mce_node[Component.IP] mce_data_out = mce_node[Interface.DATA_OUT] ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip) if rev_cmds[mce_ip]: rev_effective, error = destructive_utils.exec_rev_cmd( COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip], mce_data_out, destructive.MCE_JOB_IDS) else: rev_effective, error = destructive_utils.is_rev_effective( ssh_client, mce_data_out) assert rev_effective, error # running sanity test to check if the setup is back to normal after reverting the commands test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID) finally: if ssh_client: ssh_client.close() if web_service_obj: web_service_obj.stop_server() if response: response = a8.delete_recording( response[RecordingAttribute.RECORDING]) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_HLS_Verify_codec_based_audio_filtering_STANDALONE_ipdvrtests_46(channel): stream = channel web_service_obj = None recording = None try: service_name = "playout-packager" is_valid, config_file = vmr.fetch_config_file(service_name, mpe_config[Component.NAMESPACE]) assert is_valid, config_file v2pc_config_path = os.path.dirname(config_file) config_file_name = os.path.basename(config_file) update_path = os.path.join(v2pc_config_path, VMR_SERVICES_RESTART.UPDATE_PATH) updated_config = os.path.join(update_path, config_file_name) audio_codecs = utils.get_audio_codecs(stream,V2PC_EXIST) assert audio_codecs, "No manifest response from the given mpd url" LOGGER.info("Audio Codecs available in selected Stream are : %s", audio_codecs) assert len(set(audio_codecs.values())) >= 2, "there is only one audio codec format available in the selected stream" # Filtering out one codec from the available default codecs: filtered_codec = audio_codecs.items()[0][1] LOGGER.info("filter_codecs : %s", filtered_codec) Codec_disabled_payload = [] for pid, codec in audio_codecs.items(): if codec == filtered_codec: Codec_disabled_payload.append((pid, codec)) LOGGER.info("Codec_disabled_payload : %s", Codec_disabled_payload) push_payload = [] for filtered_codec in Codec_disabled_payload: push_payload.append({ 'action': 'disable', 'pid': str(filtered_codec[0]), 'codec': str(filtered_codec[1]).upper(), 'type': 'audio', 'default': 'false' }) with open(os.path.join(v2pc_config_path, config_file_name), "r") as fp: dt = json.load(fp) workflows = json.loads(dt['data']['workflows.conf']) template = workflows.keys()[0] publish_templates = workflows[template]['assetResolver']['workflow'][0]['publishTemplates'] for templates in publish_templates: if templates['name'] == 'HLS': if templates.has_key('streamConfiguration'): LOGGER.info("Default Payload in HLS publish template : %s", templates['streamConfiguration']) #Updating Payload templates['streamConfiguration'] = [] templates['streamConfiguration'].extend(push_payload) LOGGER.info("Updated Payload after codec filtering : %s", templates['streamConfiguration']) workflows[template]['assetResolver']['workflow'][0]['publishTemplates'] = publish_templates dt['data']['workflows.conf'] = json.dumps(workflows) with open(updated_config, 'w') as f: json.dump(dt, f, indent=4) # Apply the config with oc apply -f command redeploy_res, resp = vmr.redeploy_config_map(service_name, mpe_config[Component.NAMESPACE]) assert redeploy_res, resp delete_pods, resp = vmr.delete_vmr_pods("All", mpe_config[Component.NAMESPACE]) assert delete_pods, resp # Create recording LOGGER.info("Creating Recording") rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error playback_url = utils.get_mpe_playback_url(recording_id, "hls") LOGGER.info("playback_url : %s", playback_url) is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error #Find recording LOGGER.info("Find recording in rio") response = rio.find_recording(recording_id).json() if not response: return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id) LOGGER.info("Recording status in rio : %s", response[0]['Status']) #Playback and Validate Playback LOGGER.info("Playback Recording for the recording ID : %s", recording.get_entry(0).RecordingId) is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId) LOGGER.info("Validate recording : %s", is_valid) assert is_valid, error #Get Playback URL playback_url = utils.get_mpe_playback_url(recording_id) LOGGER.info("Playback_ulr ----- %s", playback_url) #Validate Filtered Codec Value in m3u8 filtered_codecs_validation = [] result = True codec_check = m3u8.load(playback_url) codec_check_data = codec_check.data LOGGER.info("m3u8 playback output :") LOGGER.info("codec_check_data : %s", codec_check_data) if (type(codec_check_data) == dict) and (codec_check_data.has_key('media')): filtered_codecs_validation = [media_data['codecs'] for media_data in codec_check_data['media'] if media_data['type'] == "AUDIO"] if filter_codecs not in filtered_codecs_validation: message = "Codecs filtered successfully" else: message = "filtering not happened properly" result = False assert result, message LOGGER.info("Testcase passed with the message : %s",message) finally: LOGGER.info("Reverting default payload...") vmr.redeploy_config_map(service_name, mpe_config[Component.NAMESPACE], revert=True) if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9799_tc_ply_008_playback_pause_resume(stream): """ Create a recording, pause and resume during playback and verify if playback was successful """ recording = None web_service_obj = None pause_trigger = 2 pause_duration = 5 try: start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_COMMON recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED, 1) assert is_valid, error vle_request_params = { Vle.TRICKMODES: '{0},{1},{2},{3}'.format(Vle.TRICKMODE_PAUSE, pause_trigger, pause_duration, Vle.PAUSE_WRT_SEGMENT) } is_valid, error = validate_recordings.validate_playback( recording_id, VLE_REQUEST_PARAMS=vle_request_params, VALIDATOR_TYPE=vle_validators_configuration. PLAYBACK_VALIDATION_TRICKMODE) assert is_valid, error finally: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_tc_rec_active_unique_asset(stream, name, total_recording): """ Bulk deletion of assets in Active Unique(20 requests) """ recording = None web_service_objects = [] recording_pool = None recording_id_list = [] try: queue = Queue.Queue() start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_UNIQUE recording = recording_model.Recording(total_recordings=total_recording, StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) for i in range(total_recording): recording_id = recording.get_entry(i).RecordingId recording_id_list.append(recording_id) web_service_objects.append( notification_utils.get_web_service_object(recording_id)) recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error recording_pool = mp_pool.ThreadPool(processes=total_recording) for i in range(total_recording): recording_pool.apply_async( validate_recordings.validate_recording, (recording.get_entry(i).RecordingId, web_service_objects[i]), callback=queue.put) for i in range(total_recording): is_valid, error = queue.get() assert is_valid, error for i in range(total_recording): response = rio.find_recording(recording_id_list[i]).json() is_valid, error = validate_storage.validate_recording_in_storage( response, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error time.sleep(20) for i in range(total_recording): is_valid, error = validate_recordings.validate_playback( recording.get_entry(i).RecordingId) assert is_valid, error response = a8.delete_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error for i in range(total_recording): is_valid, error = validate_recordings.validate_recording_deletion( recording_id_list[i]) assert is_valid, error is_valid, error = validate_recordings.validate_playback_using_vle( recording_id_list[i]) assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format( recording_id_list[i]) finally: if recording_pool: recording_pool.close() recording_pool.join() for web_service_obj in web_service_objects: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_HLS_Verify_codec_based_audio_filtering_V2PC_ipdvrtests_46(channel): """ JIRA ID : IPDVRTESTS-46 TITLE : "HLS : Verify codec based audio filtering" STEPS : Create a 30mins recording with future start time. Playback using the HLS publish template configured to filter based on audio codecs. Verify output of manifest curl to match the filtering configured in publish template """ stream = channel web_service_obj = None recording = None Actual_publish_template = None try: audio_codecs = utils.get_audio_codecs(stream,V2PC_EXIST) assert audio_codecs, "No manifest response from the given mpd url" LOGGER.info("audio_codecs : %s", audio_codecs) assert len(set(audio_codecs.values())) >= 2, "there is only one audio codec format available in the selected stream" # Filtering out one codec from the available default codecs: filtered_codec = audio_codecs.items()[-1][1] LOGGER.info("filtered_codec : %s", filtered_codec) Codec_disabled_payload = [] for pid, codec in audio_codecs.items(): if codec == filtered_codec: Codec_disabled_payload.append((pid, codec)) LOGGER.info("Codec_disabled_payload : %s", Codec_disabled_payload) #Getting Default Payload from publish template Actual_publish_template = v2pc_helper.get_publish_template() payload = Actual_publish_template.json() LOGGER.info("Default Payload in HLS publish template : %s", payload) codec_default_values = [ x['codec'] for x in payload['properties']['streamConfiguration'] ] codec_state = [ x['action'] for x in payload['properties']['streamConfiguration'] ] LOGGER.info("Default Codec values in HLS publish template : %s", codec_default_values) LOGGER.info("Codec States in HLS publish template : %s", codec_state) #Creating New payload with filtered codec payload['properties']['streamConfiguration'] = [] for filtered_codec in Codec_disabled_payload: payload['properties']['streamConfiguration'].append({ 'action': 'disable', 'pid': str(filtered_codec[0]), 'codec': str(filtered_codec[1]).upper(), 'type': 'audio', 'default': 'false'}) LOGGER.info("Updated Payload in HLS publish template (after codec filtering) : %s", payload) codec_updated_values = [ x['codec'] for x in payload['properties']['streamConfiguration'] ] codec_state = [ x['action'] for x in payload['properties']['streamConfiguration'] ] LOGGER.info("Filtered Codec values in HLS publish template : %s", codec_updated_values) LOGGER.info("Codec States in HLS publish template : %s", codec_state) #Updating HLS Publish template with (Filtered codec) payload Updated_publish_template = v2pc_helper.put_publish_template(payload) if Updated_publish_template == True: LOGGER.info("\nPayload Updated") else: LOGGER.info("\nPayload Update failed") #Restarting the workflow LOGGER.info("Restarting the workflow...") result, response = v2pc_helper.restart_media_workflow(gen_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc_helper.waits_till_workflow_active(gen_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response #Creating Recording with Codec filtering LOGGER.info("Creating recording with codec filtering") rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error LOGGER.info("Validate Recording for %s", recording_id) is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error #Find recording LOGGER.info("Find recording in rio") response = rio.find_recording(recording_id).json() if not response: return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id) LOGGER.info("Recording status in rio : %s", response[0]['Status']) #Playback and Validate Playback LOGGER.info("Playback Recording of %s", recording.get_entry(0).RecordingId) LOGGER.info("Recording ID : %s", recording.get_entry(0).RecordingId) is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId) LOGGER.info("Validate recording : %s", is_valid) assert is_valid, error #Get Playback URL playback_url = utils.get_mpe_playback_url(recording_id) LOGGER.info("Playback_ulr ----- %s", playback_url) #Validate Filtered Codec Value in m3u8 filtered_codecs_validation = [] result = True codec_check = m3u8.load(playback_url) codec_check_data = codec_check.data LOGGER.info("m3u8 playback output:") LOGGER.info("codec_check_data : %s", codec_check_data) if (type(codec_check_data) == dict) and (codec_check_data.has_key('media')): filtered_codecs_validation = [media_data['codecs'] for media_data in codec_check_data['media'] if media_data['type'] == "AUDIO"] if filtered_codec not in filtered_codecs_validation: message = "Codecs filtered successfully" else: message = "filtering not happened properly" result = False assert result, message LOGGER.info("Testcase passed with the message : %s",message) finally: if Actual_publish_template: #Reverting Publish template LOGGER.info("Reverting default payload ----- %s",Actual_publish_template.json()) Updated_publish_template = v2pc_helper.put_publish_template(Actual_publish_template.json()) if Updated_publish_template == True: LOGGER.info("Payload reverted") else: LOGGER.info("Payload reverting failed") #Restarting the workflow LOGGER.info("Restarting the workflow") v2pc_helper.restart_media_workflow(gen_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9740_tc_er_016_horizontal_grouping(stream, name, copy_type): """ Horizontal Grouping - Enable horizotal grouping for 12s. """ web_service_obj = None recording = None grouping_duration = '12s' try: #Taking backup of v2pc pod config info and editing the config and then restarting the services is_valid, error = cleanup(redeploy_config_map, V2pc.MANIFEST_AGENT, revert=True) assert is_valid, error is_valid, error = v2pc_edit_manifest_config( V2pc.MANIFEST_AGENT, horizontal_grouping=grouping_duration) assert is_valid, error start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error is_valid, error = verify_horizontal_grouping(recording_id, grouping_duration) assert is_valid, error is_valid, error = validate_recordings.validate_playback(recording_id) assert is_valid, error finally: #Revert back the v2pc config changes is_valid, error = cleanup(redeploy_config_map, V2pc.MANIFEST_AGENT, revert=True) assert is_valid, error if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9801_tc_des_008_mce_ni_block_pending_recording(): """ Block traffic on the outgoing MCE interface, trigger a recording(4 minutes) and unblock the interface after 2 minutes Check if the recording is INCOMPLETE. Verify the playback of recording """ ssh_client = None response = None start_duration = 30 end_duration = 270 try: rev_cmds = {} mce_nodes = v2pc.get_app_worker_nodes(MCE_INSTANCE, COMPONENT_NAME) for mce_node in mce_nodes: mce_data_out = mce_node[Interface.DATA_OUT] mce_ip = mce_node[Component.IP] ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip) # deleting the previously scheduled jobs by other test cases, in order not to tamper with the current test case destructive_utils.delete_scheduled_job(COMPONENT_NAME, ssh_client, mce_ip, destructive.MCE_JOB_IDS) des_cmd = DestructiveTesting.PACKET_LOSS_OUTGOING_INTERFACE.format( mce_data_out, DestructiveTesting.PACKET_LOSS_BLOCK) des_cmd = destructive_utils.get_outgoing_tc_cmd( mce_data_out, des_cmd) if mce_node[Interface.DATA_OUT] != mce_node[Interface.MGMT]: rev_cmds[mce_ip] = destructive_utils.schedule_rev_cmd( ssh_client, mce_data_out, mce_ip, destructive.MCE_JOB_IDS, constants.MINUTES * 10) expected_result = { DestructiveTesting.LOSS: DestructiveTesting.PACKET_LOSS_BLOCK, DestructiveTesting.DST: DestructiveTesting.NETWORK } is_des_effective, error = destructive_utils.exec_des_cmd( ssh_client, mce_data_out, des_cmd, expected_result) assert is_des_effective, error else: destructive_utils.schedule_rev_cmd(ssh_client, mce_data_out, mce_ip, destructive.MCE_JOB_IDS, constants.MINUTES * 2) rev_cmds[mce_ip] = None LOGGER.info( "Executing the command=%s to cause destruction in the component", des_cmd) ssh_client.exec_command(des_cmd) start_time = utils.get_formatted_time( constants.SECONDS * start_duration, TimeFormat.TIME_FORMAT_MS, STREAM_ID) end_time = utils.get_formatted_time(constants.SECONDS * end_duration, TimeFormat.TIME_FORMAT_MS, STREAM_ID) response = destructive_utils.create_recording_des(start_time, end_time) time.sleep(end_duration + constants.TIME_DELTA) for mce_node in mce_nodes: mce_ip = mce_node[Component.IP] mce_data_out = mce_node[Interface.DATA_OUT] ssh_client = utils.get_ssh_client(COMPONENT_NAME, COMPONENT_USERNAME, component_ip=mce_ip) if rev_cmds[mce_ip]: rev_effective, error = destructive_utils.exec_rev_cmd( COMPONENT_NAME, ssh_client, mce_ip, rev_cmds[mce_ip], mce_data_out, destructive.MCE_JOB_IDS) else: rev_effective, error = destructive_utils.is_rev_effective( ssh_client, mce_data_out) assert rev_effective, error is_valid, rec_error = validate_recordings.validate_recording_end_state( response[RecordingAttribute.RECORDING_ID], [RecordingStatus.INCOMPLETE], web_service_obj=response[RecordingAttribute.WEB_SERVICE_OBJECT]) recording_response = rio.find_recording( response[RecordingAttribute.RECORDING_ID]).json() LOGGER.debug("Recording response=%s", recording_response) assert is_valid, rec_error # validate playback to check if available segments are recorded is_valid, error = validate_recordings.validate_playback( response[RecordingAttribute.RECORDING_ID]) assert is_valid, error # running sanity test to check if the setup is back to normal after reverting the commands test_rtc9723_tc_rec_001_future_start_time_future_end_time(STREAM_ID) finally: if ssh_client: ssh_client.close() if response: response[RecordingAttribute.WEB_SERVICE_OBJECT].stop_server() response = a8.delete_recording( response[RecordingAttribute.RECORDING]) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_rtc9786_bulk_update_end_time_before_recording_start_unique(total_recording, stream, name, copy_type): """ TC9786 : Bulk update of recording requests of end time before recording(20 requests ) starts unique copy """ recording = None web_service_objects = [] recording_pool = None recording_id_list = [] try: queue = Queue.Queue() start_time = utils.get_formatted_time(constants.SECONDS * 30, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time(constants.SECONDS * 70, TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(total_recordings=total_recording, StartTime=start_time, EndTime=end_time, StreamId=stream, copyType=copy_type) for i in range(total_recording): recording_id = recording.get_entry(i).RecordingId recording_id_list.append(recording_id) web_service_objects.append(notification_utils.get_web_service_object(recording_id)) recording.get_entry(i).UpdateUrl = web_service_objects[i].get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error recording_pool = mp_pool.ThreadPool(processes=total_recording) for i in range(total_recording): response = rio.find_recording(recording_id_list[i]).json() LOGGER.debug("Response=%s", response) end_time = response[0][RecordingAttribute.END_TIME][:-1] LOGGER.debug("Scheduled end time of recording=%s is %s", recording_id_list[i], end_time) recording.get_entry(i).EndTime = utils.get_formatted_time(constants.SECONDS * 60, TimeFormat.TIME_FORMAT_MS, stream) LOGGER.debug("Updated end time of recording=%s to %s", recording_id_list[i], recording.get_entry(i).EndTime) # Update the recording with the updated end time response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error # Validating whether the updated end time was populated or not for i in range(total_recording): response = rio.find_recording(recording_id_list[i]).json() LOGGER.debug("Response=%s", response) is_valid, error = validate_recordings.validate_time(response, recording.get_entry(i).EndTime, RecordingAttribute.END_TIME) assert is_valid, error for i in range(total_recording): recording_pool.apply_async(validate_recordings.validate_recording, (recording.get_entry(i).RecordingId, web_service_objects[i]), callback=queue.put) for i in range(total_recording): is_valid, error = queue.get() assert is_valid, error for i in range(total_recording): is_valid, error = validate_recordings.validate_playback(recording.get_entry(i).RecordingId) assert is_valid, error finally: if recording_pool: recording_pool.close() recording_pool.join() if len(web_service_objects): for web_service_obj in web_service_objects: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_dash_wv_pid_based_audio_filtering_ipdvrtests_143(channel): """ JIRA_URL: https://jira01.engit.synamedia.com/browse/IPDVRTESTS-143 DESCRIPTION: Filtering Audio PID in dash templated and playback with encryption """ package_format = '' key_profile_ref = '' template_name = None pubished_template_data = None generic_conf = utils.get_spec_config() web_service_obj = None recording = None metadata = None try: audio_pids = utils.get_audio_pids(channel, V2PC_EXIST) LOGGER.info("Available Audio PIDs : {0}".format(audio_pids)) assert len(audio_pids.keys()) >= 2, ValidationError.NO_AUDIO_PID filter_pids = audio_pids.items()[-1] LOGGER.info("filtered pids {0}".format(filter_pids)) filter_pid_payload = { 'action': 'disable', 'pid': str(filter_pids[0]), 'codec': 'DD/' + str(filter_pids[1]).upper(), 'type': 'audio' } LOGGER.info("filtering %s pid from audio pids " % (str(filter_pids[0]))) LOGGER.info("Audio pids that is available in manifest ") templates_list_resp = v2pc_api.get_all_v2pc_templates() assert templates_list_resp.status_code == requests.codes.ok, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format( templates_list_resp.status_code, templates_list_resp.reason, templates_list_resp.url) templt_list = json.loads(templates_list_resp.content) for templt in templt_list: if templt.get('properties'): key_profile_ref = templt['properties'].get('keyProfileRef', '').split('.')[-1] package_format = templt['properties'].get('packageFormat', "") if (key_profile_ref == V2pc.DASH_TEMPLATE_KEY_PROFILE) and (package_format == V2pc.DASH_TEMPLATE_PACKAGE_FORMAT): template_name = templt['name'] pubished_template_data = templt break assert key_profile_ref and package_format, ValidationError.DASH_WV_TEMPLATE_UNAVAILABLE LOGGER.info("Published Template Data {0}".format(pubished_template_data)) keys_to_remove = ["externalId", "modified", "sysMeta", "transactionId", "type"] metadata = dict([(k, v) for k, v in pubished_template_data.items() if k not in keys_to_remove]) LOGGER.info("Modified metadata : {0}".format(metadata)) metadata_modified = metadata.copy() stream_config = metadata_modified['properties']['streamConfiguration'] metadata_modified['properties']['streamConfiguration'] = [] metadata_modified['properties']['streamConfiguration'].append(filter_pid_payload) # Filtering publish templated with PIDs LOGGER.info("Payload to publish template : {0}".format(metadata_modified)) update_template = v2pc_helper.put_publish_template(metadata_modified, template=template_name) assert update_template, "Unable to update the published template with renamed segment" # Restart the workflow result, response = v2pc_helper.restart_media_workflow(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc_helper.waits_till_workflow_active(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response # Step 1: Create a recording for 30 mins.. rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, channel) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, channel) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=channel) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error # Step 2: Playback using DASH publish template configured to filter based on audio PIDs LOGGER.info("Playback Recording with Dash Widevine") is_valid, error = validate_recordings.validate_playback(recording_id, playback_types=[PlaybackTypes.DASH_WV, ]) assert is_valid, error filtered_pids2 = [] result = True # Step 3: Verify output of manifest curl to match filtering configured in publish template playback_url = utils.get_mpe_playback_url(recording_id, playback_type=PlaybackTypes.DASH_WV) resp = requests.get(playback_url) xml_val = utils.xml_dict(resp.content) LOGGER.info("DASH WV MPD Manifest details : {0}".format(xml_val)) if xml_val["MPD"]["Period"]: for period in xml_val["MPD"]["Period"]: for adt_set in period["AdaptationSet"]: if adt_set.has_key('contentType') and adt_set['contentType'] == Feed.AUDIO: for rep in adt_set["Representation"]: LOGGER.info("representation list {0}".format(rep)) pids_picked = re.findall(re.compile('audio_\d*'),rep["id"])[-1].replace("audio_", '') if pids_picked: filtered_pids2.append(pids_picked) LOGGER.info("filtered_pids2 : {0}".format(filtered_pids2)) if filtered_pids2 and (len(filtered_pids2) < len(audio_pids)) and (str(filter_pids[0]) not in filtered_pids2): message = "audio pids filtered successfully" else: message = "filtering not happened properly" result = False assert result, message finally: if web_service_obj: web_service_obj.stop_server() if recording: a8.delete_recording(recording) LOGGER.info("recording details destroyed.. ") if metadata: update_template = v2pc_helper.put_publish_template(metadata, template=template_name) assert update_template, "Unable to revert the published template" result, response = v2pc_helper.restart_media_workflow(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response