def test_tc10367_restart_channel_when_recording(channel): """ JIRA ID : IPDVRTESTS-58 JIRA Link : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-58 TC10367: Restart the channel during recording and then playback """ stream = channel recording = None web_service_obj = None try: rec_buffer_time = utils.get_rec_duration( dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration() start_time = utils.get_formatted_time( (constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time( (constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) copy_type = RecordingAttribute.COPY_TYPE_UNIQUE LOGGER.debug("Stream Id : %s", stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId LOGGER.info("Recording Id :%s", recording_id) web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error LOGGER.info("Restart Channel : %s", stream) # Restart the stream is_valid, error = v2pc.restart_stream(stream, count=3) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error finally: if web_service_obj: web_service_obj.stop_server() response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_filter_video_based_on_rank_ipdvrtests_235(channel): """ JIRA ID : IPDVRTESTS-235 JIRA Link : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-235 Description: DASH-Confirm MPE publish template to filter video based on rank """ stream = channel recording = None metadata = None web_service_obj = None try: source_xml = utils.get_source_mpd_content(stream) bit_res = utils.get_bitrates_resolutions(source_xml) bitrates = bit_res.keys() assert len(bitrates) >= 2, "Not enough video profiles in the selected stream to filter" # Step1: Configure Variants in MPE publish template to filter video based on rank push_payload = { "name": "default", "order": "rank", "selectivePublish": "true", "profileOrdering": [ { "rank": "1" }, { "rank": "2" } ] } generic_config = utils.get_spec_config() key_profile = generic_config[Component.WORK_FLOW][Component.KEY_PROFILE_DASH_WIDEVINE] response, published_template_data, template_name = v2pc_helper.get_publish_template_by_format(template_format=V2pc.DASH_TEMPLATE_PACKAGE_FORMAT, key_profile=key_profile) assert response, "Cannot find the template data for the given format/key profile" LOGGER.debug("Published Template Data : %s", published_template_data) keys_to_be_removed = V2pc.OTHER_KEYS metadata = dict([(key, value) for key, value in published_template_data.items() if key not in keys_to_be_removed]) LOGGER.debug("Modified metadata : %s", metadata) metadata_modified = metadata.copy() metadata_modified['properties']["variants"] = [push_payload, ] LOGGER.debug("modified publish template : %s", metadata_modified) update_template = v2pc.put_publish_template(metadata_modified, template=template_name) assert update_template, "Unable to update the published template with renamed segment" result, response = v2pc.restart_media_workflow(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc_helper.waits_till_workflow_active(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME], 120) assert result, response # Step2: Create 30 minute recording LOGGER.info("Creating Recording") rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error playback_url = utils.get_mpe_playback_url(recording_id, PlaybackTypes.DASH_WV) is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error # step3: Check for manifest for the variants using curl mpd_res = requests.get(playback_url) res_bitrates = [] result_dict = utils.xml_dict(mpd_res.content) for repre in result_dict['MPD']['Period'][0]['AdaptationSet']: if repre.has_key("Representation") and repre.has_key("contentType") and repre["contentType"] == 'video': for rate in repre["Representation"]: if "video" in rate['id'] and int(rate['bandwidth']) not in res_bitrates: res_bitrates.append(int(rate['bandwidth'])) strip_value = len(push_payload["profileOrdering"]) - len(bitrates) if strip_value != 0: assert sorted(bitrates)[::-1][0:strip_value] == res_bitrates, "Video bitrates are not filtered based on rank given in publish template" else: assert sorted(bitrates)[::-1] == res_bitrates, "Video bitrates are not filtered based on rank given in publish template" finally: if metadata: LOGGER.info("Reverting the publish template changes") update_template = v2pc.put_publish_template(metadata, template=template_name) assert update_template, "Unable to update the publish template" result, response = v2pc.restart_media_workflow(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc_helper.waits_till_workflow_active(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME],120) assert result, response if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_delete_hot_recording_ipdvrtests_52(stream): """ JIRA ID : IPDVRTESTS-52 TITLE : "Delete Rec(Hot recording)" STEPS : Create a 30 min recording, when program is already started, mark the recording for deletion. Check memsql to verify erase time is set. Check cos http logs for delete segments. Playback should not be possible after marking it for delete. """ recording = None web_service_obj = None try: rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time(constants.SECONDS, TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * rec_duration), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error recording_id = recording.get_entry(0).RecordingId response = rio.find_recording(recording_id).json() LOGGER.debug("Response=%s", response) start_time = utils.get_parsed_time(response[0][RecordingAttribute.START_TIME][:-1]) current_time = datetime.datetime.utcnow() # wait till the recording start time if current_time < start_time: utils.wait(start_time - current_time, constants.TIME_DELTA) is_valid, error = validate_recordings.validate_notification(web_service_obj, constants.RecordingStatus.STARTED) assert is_valid, error time.sleep(15 * constants.SECONDS) delete_time = utils.get_formatted_time(constants.SECONDS * 0, TimeFormat.TIME_FORMAT_MS, stream) response = a8.delete_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording_deletion(recording_id) assert is_valid, error response = rio.find_recording(recording_id).json() is_valid, error = validate_storage.validate_recording_in_storage(response, Cos.ACTIVE_STORAGE, Cos.RECORDING_NOT_STORED) assert is_valid, error is_valid, error = validate_recordings.validate_playback_using_vle(recording_id, EndTime=delete_time) assert not is_valid, ValidationError.DELETED_RECORDING_PLAYED_BACK.format(recording_id) finally: if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_dash_wv_pid_based_audio_filtering_ipdvrtests_143(channel): """ JIRA_URL: https://jira01.engit.synamedia.com/browse/IPDVRTESTS-143 DESCRIPTION: Filtering Audio PID in dash templated and playback with encryption """ package_format = '' key_profile_ref = '' template_name = None pubished_template_data = None generic_conf = utils.get_spec_config() web_service_obj = None recording = None metadata = None try: audio_pids = utils.get_audio_pids(channel, V2PC_EXIST) LOGGER.info("Available Audio PIDs : {0}".format(audio_pids)) assert len(audio_pids.keys()) >= 2, ValidationError.NO_AUDIO_PID filter_pids = audio_pids.items()[-1] LOGGER.info("filtered pids {0}".format(filter_pids)) filter_pid_payload = { 'action': 'disable', 'pid': str(filter_pids[0]), 'codec': 'DD/' + str(filter_pids[1]).upper(), 'type': 'audio' } LOGGER.info("filtering %s pid from audio pids " % (str(filter_pids[0]))) LOGGER.info("Audio pids that is available in manifest ") templates_list_resp = v2pc_api.get_all_v2pc_templates() assert templates_list_resp.status_code == requests.codes.ok, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format( templates_list_resp.status_code, templates_list_resp.reason, templates_list_resp.url) templt_list = json.loads(templates_list_resp.content) for templt in templt_list: if templt.get('properties'): key_profile_ref = templt['properties'].get('keyProfileRef', '').split('.')[-1] package_format = templt['properties'].get('packageFormat', "") if (key_profile_ref == V2pc.DASH_TEMPLATE_KEY_PROFILE) and (package_format == V2pc.DASH_TEMPLATE_PACKAGE_FORMAT): template_name = templt['name'] pubished_template_data = templt break assert key_profile_ref and package_format, ValidationError.DASH_WV_TEMPLATE_UNAVAILABLE LOGGER.info("Published Template Data {0}".format(pubished_template_data)) keys_to_remove = ["externalId", "modified", "sysMeta", "transactionId", "type"] metadata = dict([(k, v) for k, v in pubished_template_data.items() if k not in keys_to_remove]) LOGGER.info("Modified metadata : {0}".format(metadata)) metadata_modified = metadata.copy() stream_config = metadata_modified['properties']['streamConfiguration'] metadata_modified['properties']['streamConfiguration'] = [] metadata_modified['properties']['streamConfiguration'].append(filter_pid_payload) # Filtering publish templated with PIDs LOGGER.info("Payload to publish template : {0}".format(metadata_modified)) update_template = v2pc_helper.put_publish_template(metadata_modified, template=template_name) assert update_template, "Unable to update the published template with renamed segment" # Restart the workflow result, response = v2pc_helper.restart_media_workflow(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc_helper.waits_till_workflow_active(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response # Step 1: Create a recording for 30 mins.. rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, channel) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, channel) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=channel) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error # Step 2: Playback using DASH publish template configured to filter based on audio PIDs LOGGER.info("Playback Recording with Dash Widevine") is_valid, error = validate_recordings.validate_playback(recording_id, playback_types=[PlaybackTypes.DASH_WV, ]) assert is_valid, error filtered_pids2 = [] result = True # Step 3: Verify output of manifest curl to match filtering configured in publish template playback_url = utils.get_mpe_playback_url(recording_id, playback_type=PlaybackTypes.DASH_WV) resp = requests.get(playback_url) xml_val = utils.xml_dict(resp.content) LOGGER.info("DASH WV MPD Manifest details : {0}".format(xml_val)) if xml_val["MPD"]["Period"]: for period in xml_val["MPD"]["Period"]: for adt_set in period["AdaptationSet"]: if adt_set.has_key('contentType') and adt_set['contentType'] == Feed.AUDIO: for rep in adt_set["Representation"]: LOGGER.info("representation list {0}".format(rep)) pids_picked = re.findall(re.compile('audio_\d*'),rep["id"])[-1].replace("audio_", '') if pids_picked: filtered_pids2.append(pids_picked) LOGGER.info("filtered_pids2 : {0}".format(filtered_pids2)) if filtered_pids2 and (len(filtered_pids2) < len(audio_pids)) and (str(filter_pids[0]) not in filtered_pids2): message = "audio pids filtered successfully" else: message = "filtering not happened properly" result = False assert result, message finally: if web_service_obj: web_service_obj.stop_server() if recording: a8.delete_recording(recording) LOGGER.info("recording details destroyed.. ") if metadata: update_template = v2pc_helper.put_publish_template(metadata, template=template_name) assert update_template, "Unable to revert the published template" result, response = v2pc_helper.restart_media_workflow(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response
def test_HLS_Verify_codec_based_audio_filtering_V2PC_ipdvrtests_46(channel): """ JIRA ID : IPDVRTESTS-46 TITLE : "HLS : Verify codec based audio filtering" STEPS : Create a 30mins recording with future start time. Playback using the HLS publish template configured to filter based on audio codecs. Verify output of manifest curl to match the filtering configured in publish template """ stream = channel web_service_obj = None recording = None Actual_publish_template = None try: audio_codecs = utils.get_audio_codecs(stream,V2PC_EXIST) assert audio_codecs, "No manifest response from the given mpd url" LOGGER.info("audio_codecs : %s", audio_codecs) assert len(set(audio_codecs.values())) >= 2, "there is only one audio codec format available in the selected stream" # Filtering out one codec from the available default codecs: filtered_codec = audio_codecs.items()[-1][1] LOGGER.info("filtered_codec : %s", filtered_codec) Codec_disabled_payload = [] for pid, codec in audio_codecs.items(): if codec == filtered_codec: Codec_disabled_payload.append((pid, codec)) LOGGER.info("Codec_disabled_payload : %s", Codec_disabled_payload) #Getting Default Payload from publish template Actual_publish_template = v2pc_helper.get_publish_template() payload = Actual_publish_template.json() LOGGER.info("Default Payload in HLS publish template : %s", payload) codec_default_values = [ x['codec'] for x in payload['properties']['streamConfiguration'] ] codec_state = [ x['action'] for x in payload['properties']['streamConfiguration'] ] LOGGER.info("Default Codec values in HLS publish template : %s", codec_default_values) LOGGER.info("Codec States in HLS publish template : %s", codec_state) #Creating New payload with filtered codec payload['properties']['streamConfiguration'] = [] for filtered_codec in Codec_disabled_payload: payload['properties']['streamConfiguration'].append({ 'action': 'disable', 'pid': str(filtered_codec[0]), 'codec': str(filtered_codec[1]).upper(), 'type': 'audio', 'default': 'false'}) LOGGER.info("Updated Payload in HLS publish template (after codec filtering) : %s", payload) codec_updated_values = [ x['codec'] for x in payload['properties']['streamConfiguration'] ] codec_state = [ x['action'] for x in payload['properties']['streamConfiguration'] ] LOGGER.info("Filtered Codec values in HLS publish template : %s", codec_updated_values) LOGGER.info("Codec States in HLS publish template : %s", codec_state) #Updating HLS Publish template with (Filtered codec) payload Updated_publish_template = v2pc_helper.put_publish_template(payload) if Updated_publish_template == True: LOGGER.info("\nPayload Updated") else: LOGGER.info("\nPayload Update failed") #Restarting the workflow LOGGER.info("Restarting the workflow...") result, response = v2pc_helper.restart_media_workflow(gen_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc_helper.waits_till_workflow_active(gen_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response #Creating Recording with Codec filtering LOGGER.info("Creating recording with codec filtering") rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error LOGGER.info("Validate Recording for %s", recording_id) is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error #Find recording LOGGER.info("Find recording in rio") response = rio.find_recording(recording_id).json() if not response: return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id) LOGGER.info("Recording status in rio : %s", response[0]['Status']) #Playback and Validate Playback LOGGER.info("Playback Recording of %s", recording.get_entry(0).RecordingId) LOGGER.info("Recording ID : %s", recording.get_entry(0).RecordingId) is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId) LOGGER.info("Validate recording : %s", is_valid) assert is_valid, error #Get Playback URL playback_url = utils.get_mpe_playback_url(recording_id) LOGGER.info("Playback_ulr ----- %s", playback_url) #Validate Filtered Codec Value in m3u8 filtered_codecs_validation = [] result = True codec_check = m3u8.load(playback_url) codec_check_data = codec_check.data LOGGER.info("m3u8 playback output:") LOGGER.info("codec_check_data : %s", codec_check_data) if (type(codec_check_data) == dict) and (codec_check_data.has_key('media')): filtered_codecs_validation = [media_data['codecs'] for media_data in codec_check_data['media'] if media_data['type'] == "AUDIO"] if filtered_codec not in filtered_codecs_validation: message = "Codecs filtered successfully" else: message = "filtering not happened properly" result = False assert result, message LOGGER.info("Testcase passed with the message : %s",message) finally: if Actual_publish_template: #Reverting Publish template LOGGER.info("Reverting default payload ----- %s",Actual_publish_template.json()) Updated_publish_template = v2pc_helper.put_publish_template(Actual_publish_template.json()) if Updated_publish_template == True: LOGGER.info("Payload reverted") else: LOGGER.info("Payload reverting failed") #Restarting the workflow LOGGER.info("Restarting the workflow") v2pc_helper.restart_media_workflow(gen_config[Component.WORK_FLOW][Component.WORKFLOW_NAME]) if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_HLS_Verify_codec_based_audio_filtering_STANDALONE_ipdvrtests_46(channel): stream = channel web_service_obj = None recording = None try: service_name = "playout-packager" is_valid, config_file = vmr.fetch_config_file(service_name, mpe_config[Component.NAMESPACE]) assert is_valid, config_file v2pc_config_path = os.path.dirname(config_file) config_file_name = os.path.basename(config_file) update_path = os.path.join(v2pc_config_path, VMR_SERVICES_RESTART.UPDATE_PATH) updated_config = os.path.join(update_path, config_file_name) audio_codecs = utils.get_audio_codecs(stream,V2PC_EXIST) assert audio_codecs, "No manifest response from the given mpd url" LOGGER.info("Audio Codecs available in selected Stream are : %s", audio_codecs) assert len(set(audio_codecs.values())) >= 2, "there is only one audio codec format available in the selected stream" # Filtering out one codec from the available default codecs: filtered_codec = audio_codecs.items()[0][1] LOGGER.info("filter_codecs : %s", filtered_codec) Codec_disabled_payload = [] for pid, codec in audio_codecs.items(): if codec == filtered_codec: Codec_disabled_payload.append((pid, codec)) LOGGER.info("Codec_disabled_payload : %s", Codec_disabled_payload) push_payload = [] for filtered_codec in Codec_disabled_payload: push_payload.append({ 'action': 'disable', 'pid': str(filtered_codec[0]), 'codec': str(filtered_codec[1]).upper(), 'type': 'audio', 'default': 'false' }) with open(os.path.join(v2pc_config_path, config_file_name), "r") as fp: dt = json.load(fp) workflows = json.loads(dt['data']['workflows.conf']) template = workflows.keys()[0] publish_templates = workflows[template]['assetResolver']['workflow'][0]['publishTemplates'] for templates in publish_templates: if templates['name'] == 'HLS': if templates.has_key('streamConfiguration'): LOGGER.info("Default Payload in HLS publish template : %s", templates['streamConfiguration']) #Updating Payload templates['streamConfiguration'] = [] templates['streamConfiguration'].extend(push_payload) LOGGER.info("Updated Payload after codec filtering : %s", templates['streamConfiguration']) workflows[template]['assetResolver']['workflow'][0]['publishTemplates'] = publish_templates dt['data']['workflows.conf'] = json.dumps(workflows) with open(updated_config, 'w') as f: json.dump(dt, f, indent=4) # Apply the config with oc apply -f command redeploy_res, resp = vmr.redeploy_config_map(service_name, mpe_config[Component.NAMESPACE]) assert redeploy_res, resp delete_pods, resp = vmr.delete_vmr_pods("All", mpe_config[Component.NAMESPACE]) assert delete_pods, resp # Create recording LOGGER.info("Creating Recording") rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC) start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object(recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content) assert is_valid, error playback_url = utils.get_mpe_playback_url(recording_id, "hls") LOGGER.info("playback_url : %s", playback_url) is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj) assert is_valid, error #Find recording LOGGER.info("Find recording in rio") response = rio.find_recording(recording_id).json() if not response: return False, ValidationError.RECORDING_RESPONSE_EMPTY.format(recording_id) LOGGER.info("Recording status in rio : %s", response[0]['Status']) #Playback and Validate Playback LOGGER.info("Playback Recording for the recording ID : %s", recording.get_entry(0).RecordingId) is_valid, error = validate_recordings.validate_playback(recording.get_entry(0).RecordingId) LOGGER.info("Validate recording : %s", is_valid) assert is_valid, error #Get Playback URL playback_url = utils.get_mpe_playback_url(recording_id) LOGGER.info("Playback_ulr ----- %s", playback_url) #Validate Filtered Codec Value in m3u8 filtered_codecs_validation = [] result = True codec_check = m3u8.load(playback_url) codec_check_data = codec_check.data LOGGER.info("m3u8 playback output :") LOGGER.info("codec_check_data : %s", codec_check_data) if (type(codec_check_data) == dict) and (codec_check_data.has_key('media')): filtered_codecs_validation = [media_data['codecs'] for media_data in codec_check_data['media'] if media_data['type'] == "AUDIO"] if filter_codecs not in filtered_codecs_validation: message = "Codecs filtered successfully" else: message = "filtering not happened properly" result = False assert result, message LOGGER.info("Testcase passed with the message : %s",message) finally: LOGGER.info("Reverting default payload...") vmr.redeploy_config_map(service_name, mpe_config[Component.NAMESPACE], revert=True) if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)
def test_modify_channel_ipdvrtests_57(stream): """ JIRA_URL: https://jira01.engit.synamedia.com/browse/IPDVRTESTS-57 DESCRIPTION: Modify existing Channels in channel lineup recording the with updated channel and validating playback """ recording = None web_service_obj = None active_chl_lineup = None try: generic_confg = utils.get_spec_config() workflow = generic_confg[Component.WORK_FLOW][Component.WORKFLOW_NAME] response = v2pc_api.get_workflow(workflow) workflow_dict = json.loads(response.content) LOGGER.debug("[INFO:] workflow_dict %s", workflow_dict) assert workflow_dict.get( "properties") and workflow_dict["properties"].get( "mediaServiceEndpoints"), Feed.CHANNEL_DETAILS_UNAVAILABLE for media_service in workflow_dict["properties"][ "mediaServiceEndpoints"]: if media_service.get( "endpointType") == "MEDIASOURCE" and media_service.get( "properties" ) and "assetLineupRef" in media_service["properties"]: active_chl_lineup = media_service["properties"][ "assetLineupRef"].split(".")[-1] break assert active_chl_lineup, Feed.CHANNEL_DETAILS_UNAVAILABLE LOGGER.debug("[INFO:] active channel lineup detail %s", active_chl_lineup) response = v2pc_api.get_channel_line_up(active_chl_lineup) LOGGER.debug("[DEBUG:] response %s", response) response.raise_for_status() LOGGER.debug("[DEBUG:] response content %s", response.content) avail_chanl_lineup_dict = json.loads(response.content) start_stamp = datetime.utcnow().strftime(TimeFormat.TIME_FORMAT_LOG) # Remove the Channel from Lineup LOGGER.info("Step 1: Remove existing channel from line up") response = v2pc_api.remove_channel_from_channel_line_up( stream, avail_chanl_lineup_dict) LOGGER.debug("[DEBUG:] response %s", response) assert response, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format( response.status_code, response.reason, response.url) LOGGER.debug("[DEBUG:] response content %s", response.content) original_lineup = json.loads(response.content) LOGGER.info("Step 1.1: Validate MCE for Core dump") is_valid, msg = utils.core_dump("mce") assert is_valid, msg LOGGER.info("Step 2: Verify Channel not being captured") is_avail = v2pc.is_channel_available(stream) assert not is_avail, ValidationError.CHANNEL_EXIST.format(stream) # Get the channel details to update original_channel_details = v2pc_api.get_channel_source(stream) LOGGER.debug("Channel Info : %s", original_channel_details.content) LOGGER.info("Step 2.1: Adding channel back to channel line up") update_lineup = v2pc_api.add_channel_to_channel_line_up( stream, avail_chanl_lineup_dict) assert update_lineup, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format( update_lineup.status_code, update_lineup.reason, update_lineup.url) LOGGER.info("Step 3: Verify channel is being moving to Capture State") LOGGER.info("[INFO:] Waits till stream to move to capture state ") is_valid, error = v2pc.waits_till_channel_capture(stream) assert is_valid, error result, response = v2pc.restart_media_workflow( generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response result, response = v2pc.waits_till_workflow_active( generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME]) assert result, response end_stamp = datetime.utcnow().strftime(TimeFormat.TIME_FORMAT_LOG) rec_buffer_time = utils.get_rec_duration( dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration( dur_confg_key=Component.SHORT_REC_LEN_IN_SEC) start_time = utils.get_formatted_time( (constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS) end_time = utils.get_formatted_time( (constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS) copy_type = RecordingAttribute.COPY_TYPE_UNIQUE LOGGER.debug("Stream Id : %s", stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, copyType=copy_type, StreamId=stream) recording_id = recording.get_entry(0).RecordingId LOGGER.info("Second Recording Id :%s", recording_id) LOGGER.info("STEP 3.1: NO ERROR IN MCE LOGS") is_valid, error = v2pc.collect_error_log(start_stamp, end_stamp, TestLog.MCE_ERROR_LOG_PATH, recording_id, component=Component.MCE, is_error=True) assert is_valid, error LOGGER.info("Step 3.2: Validate MCE for Core dump") is_valid, msg = utils.core_dump("mce") assert is_valid, msg # Create a notification handler web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created : %s", recording.serialize()) LOGGER.info( "Step 4: Create recording on the updated channel and verify its completed" ) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) assert is_valid, error finally: if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code) if active_chl_lineup: response = v2pc_api.get_channel_line_up(active_chl_lineup) assert response, 'unable to fetch channel line up detail' avail_chanl_lineup_dict = json.loads(response.content) update_lineup = v2pc_api.add_channel_to_channel_line_up( stream, avail_chanl_lineup_dict) assert update_lineup, 'unable to revert the channel line up'
def test_schedule_recording_ipdvrtests_47(stream): """ JIRA_URL : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-47 DESCRIPTION : Schedule recording(future timing) """ web_service_obj = None recording = None try: # STEP 1: Create recording LOGGER.info("Creating Recording") rec_buffer_time = utils.get_rec_duration( dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC) rec_duration = utils.get_rec_duration( dur_confg_key=Component.SHORT_REC_LEN_IN_SEC) start_time = utils.get_formatted_time( (constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream) end_time = utils.get_formatted_time( (constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream) recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream) recording_id = recording.get_entry(0).RecordingId web_service_obj = notification_utils.get_web_service_object( recording_id) recording.get_entry(0).UpdateUrl = web_service_obj.get_url() LOGGER.debug("Recording instance created=%s", recording.serialize()) response = a8.create_recording(recording) is_valid, error = validate_common.validate_http_response_status_code( response, requests.codes.no_content) assert is_valid, error is_valid, error = validate_recordings.validate_recording( recording_id, web_service_obj) response = rio.find_recording(recording_id) resp = json.loads(response.content) #STEP 2: Check memsql table for recording start time using RIO API LOGGER.info("Validate recording start time") is_valid, error = validate_start_time(start_time, resp, stream) assert is_valid, error #STEP 3: Check cos logs to see segments are written by using COS API LOGGER.info("Check segments in cos storage") is_valid, error = validate_storage.validate_recording_in_storage( resp, Cos.ACTIVE_STORAGE, Cos.RECORDING_STORED) assert is_valid, error #STEP 4: Check MA/SR pod logs for any errors while recording LOGGER.info("Check MA/SR pod logs for any errors while recording") s_time = utils.get_parsed_time(str(start_time)[:-1]) e_time = utils.get_parsed_time(str(end_time)[:-1]) is_valid, error = vmr_helper.verify_error_logs_in_vmr( stream, 'manifest-agent', 'vmr', search_string="ERROR", start_time=s_time, end_time=e_time) assert is_valid, error is_valid, error = vmr_helper.verify_error_logs_in_vmr( stream, 'segment-recorder', 'vmr', search_string="ERROR", start_time=s_time, end_time=e_time) assert is_valid, error #STEP 5: Check no discontinuity errors in MA LOGGER.info("Check no discontinuity in MA") is_valid, error = vmr_helper.verify_error_logs_in_vmr( stream, "manifest-agent", "vmr", search_string="discontinuity", start_time=s_time, end_time=e_time) assert is_valid, error #STEP 6: Check recording goes to complete state without any coredumps on MCE/ABRGW LOGGER.info("Check core dumps") is_valid, msg = utils.core_dump("mce") assert is_valid, msg finally: if web_service_obj: web_service_obj.stop_server() if recording: response = a8.delete_recording(recording) LOGGER.debug("Recording clean up status code=%s", response.status_code)