Пример #1
0
def test_filter_video_based_on_rank_ipdvrtests_235(channel):
    """
    JIRA ID : IPDVRTESTS-235
    JIRA Link : https://jira01.engit.synamedia.com/browse/IPDVRTESTS-235
    Description: DASH-Confirm MPE publish template to filter video based on rank
    """
    stream = channel
    recording = None
    metadata = None
    web_service_obj = None
    try:
        source_xml = utils.get_source_mpd_content(stream)
        bit_res = utils.get_bitrates_resolutions(source_xml)
        bitrates = bit_res.keys()
        assert len(bitrates) >= 2, "Not enough video profiles in the selected stream to filter"

        # Step1: Configure Variants in MPE publish template to filter video based on rank
        push_payload = {
                        "name": "default",
                        "order": "rank",
                        "selectivePublish": "true",
                        "profileOrdering": [
                             {
                                "rank": "1"
                             },
                             {
                                "rank": "2"
                             }
                         ]
                        }
        generic_config = utils.get_spec_config()
        key_profile = generic_config[Component.WORK_FLOW][Component.KEY_PROFILE_DASH_WIDEVINE]
        response, published_template_data, template_name = v2pc_helper.get_publish_template_by_format(template_format=V2pc.DASH_TEMPLATE_PACKAGE_FORMAT, key_profile=key_profile)
        assert response, "Cannot find the template data for the given format/key profile"
        LOGGER.debug("Published Template Data : %s", published_template_data)
        keys_to_be_removed = V2pc.OTHER_KEYS
        metadata = dict([(key, value) for key, value in published_template_data.items() if key not in keys_to_be_removed])
        LOGGER.debug("Modified metadata : %s", metadata)
        metadata_modified = metadata.copy()
        metadata_modified['properties']["variants"] = [push_payload, ]
        LOGGER.debug("modified publish template : %s", metadata_modified)
        update_template = v2pc.put_publish_template(metadata_modified, template=template_name)
        assert update_template, "Unable to update the published template with renamed segment"

        result, response = v2pc.restart_media_workflow(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME])
        assert result, response

        result, response = v2pc_helper.waits_till_workflow_active(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME], 120)
        assert result, response

        # Step2: Create 30 minute recording
        LOGGER.info("Creating Recording")
        rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC)
        rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC)
        start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, stream)
        end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, stream)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)
        assert is_valid, error
        playback_url = utils.get_mpe_playback_url(recording_id, PlaybackTypes.DASH_WV)
        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)
        assert is_valid, error


        # step3: Check for manifest for the variants using curl
        mpd_res = requests.get(playback_url)
        res_bitrates = []
        result_dict = utils.xml_dict(mpd_res.content)
        for repre in result_dict['MPD']['Period'][0]['AdaptationSet']:
            if repre.has_key("Representation") and repre.has_key("contentType") and repre["contentType"] == 'video':
                for rate in repre["Representation"]:
                    if "video" in rate['id'] and int(rate['bandwidth']) not in res_bitrates:
                        res_bitrates.append(int(rate['bandwidth']))

        strip_value = len(push_payload["profileOrdering"]) - len(bitrates)
        if strip_value != 0:
            assert sorted(bitrates)[::-1][0:strip_value] == res_bitrates, "Video bitrates are not filtered based on rank given in publish template"
        else:
            assert sorted(bitrates)[::-1] == res_bitrates, "Video bitrates are not filtered based on rank given in publish template"
    finally:
        if metadata:
            LOGGER.info("Reverting the publish template changes")
            update_template = v2pc.put_publish_template(metadata, template=template_name)
            assert update_template, "Unable to update the publish template"

            result, response = v2pc.restart_media_workflow(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME])

            assert result, response
            result, response = v2pc_helper.waits_till_workflow_active(generic_config[Component.WORK_FLOW][Component.WORKFLOW_NAME],120)
            assert result, response

        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s", response.status_code)
Пример #2
0
def test_rtc10064_health_check_dp_components():
    config_info = get_configInfo()
    vmr_pods = get_pod_details(constants.Component.VMR)
    running_pods = list()
    unavailable_pods = list()
    mandatory_pods = [
        'a8-updater', 'archive-agent', 'api', 'dash-origin', 'health-agent',
        'health-monitor', 'manifest-agent', 'nsa', 'reconstitution-agent',
        'recorder-manager', 'segment-recorder', 'ui', 'zookeeper', 'bmw',
        'sats-server'
    ]
    for i in vmr_pods.items:
        pod_name = i.metadata.name
        running_pods.append(pod_name)
        LOGGER.debug("%s\t%s\t%s" %
                     (i.status.pod_ip, pod_name, i.status.phase))
        if i.status.phase not in ["Succeeded", "Running"]:
            raise Exception(
                "Testcase failed: Some of the pods in VMR are not in running state."
            )

    for pod in mandatory_pods:
        verification_data = ','.join(rp for rp in running_pods if pod in rp)
        if len(verification_data) == 0:
            msg = '%s pod is unavailable' % pod
            LOGGER.error(msg)
            unavailable_pods.append(pod)

    if len(unavailable_pods) > 0:
        raise Exception('Following pods are unavailable: %s' %
                        ','.join(unavailable_pods))

    # Check the status of MCE pods:
    mce_confg = get_spec_config(Component.MCE)
    mce_username = mce_confg[Component.MCE_USERNAME]
    if is_standalone(Component.MCE):
        mce_ip = mce_confg['node1']['ip']
        mce_key = mce_confg['sshkey']
        ssh_client = get_custom_ssh_client(ip=mce_ip,
                                           username=mce_username,
                                           key_path=mce_key)
        stdin, stdout, stderr = ssh_client.exec_command(
            "/sbin/service mce-app status", get_pty=False)
        data = stdout.readlines()
        ssh_client.close()
        for line in data:
            LOGGER.debug(line)
            each_service = line.split()
            if len(each_service) > 2:
                if each_service[0] == 'asperacentral':
                    continue
                assert 'RUNNING' == str(
                    each_service[1]), 'ERROR: %s service is in %s state' % (
                        each_service[0], each_service[1])
        LOGGER.debug('MCE IP: ' + str(mce_ip))
        LOGGER.debug('MCE is up and running...')
        LOGGER.debug("EXIT CODE: " + str(stdout.channel.recv_exit_status()))

    if is_v2pc(Component.MCE):
        mce_ip = mce_confg[Component.VIRTUAL_IP]
        ssh_client = get_ssh_client(Component.MCE, mce_username)
        stdin, stdout, stderr = ssh_client.exec_command(
            "/sbin/service mce-app status", get_pty=False)
        data = stdout.readlines()
        ssh_client.close()
        for line in data:
            LOGGER.debug(line)
            each_service = line.split()
            if len(each_service) > 2:
                if each_service[0] == 'asperacentral':
                    continue
                assert 'RUNNING' == str(
                    each_service[1]), 'ERROR: %s service is in %s state' % (
                        each_service[0], each_service[1])
        LOGGER.debug('MCE IP: ' + str(mce_ip))
        LOGGER.debug('MCE is up and running...')
        LOGGER.debug("EXIT CODE: " + str(stdout.channel.recv_exit_status()))

    # Check the status of MPE pods:
    mpe_confg = get_spec_config(Component.MPE)
    if is_standalone(Component.MPE):
        # namespace
        mpe_pods = get_pod_details(mpe_confg[Component.NAMESPACE])
        running_pods = list()
        unavailable_pods = list()
        mandatory_pods = [
            'mpe',
        ]
        for i in mpe_pods.items:
            pod_name = i.metadata.name
            running_pods.append(pod_name)
            LOGGER.debug("%s\t%s\t%s" %
                         (i.status.pod_ip, pod_name, i.status.phase))
            if i.status.phase not in ["Succeeded", "Running"]:
                raise Exception(
                    "Testcase failed: Some of the pods in VMR are not in running state."
                )

        for pod in mandatory_pods:
            verification_data = ','.join(rp for rp in running_pods
                                         if pod in rp)
            if len(verification_data) == 0:
                msg = '%s pod is unavailable' % pod
                LOGGER.error(msg)
                unavailable_pods.append(pod)

        if len(unavailable_pods) > 0:
            raise Exception('Following pods are unavailable: %s' %
                            ','.join(unavailable_pods))

    if is_v2pc(Component.MPE):
        mpe_ip = mpe_confg['node1'][Component.IP]
        mpe_username = mpe_confg[Component.MPE_USERNAME]
        ssh_client = get_custom_ssh_client(
            mpe_ip,
            username=mpe_username,
            key_path=mpe_confg[Component.SSH_KEY])
        stdin, stdout, stderr = ssh_client.exec_command(
            "systemctl status mpe.service", get_pty=False)
        data = stdout.read()
        ssh_client.close()
        LOGGER.debug(str(data))
        if 'active' in str(data) and 'running' in str(data):
            LOGGER.debug('MPE service is up and running...')
        else:
            raise Exception('ERROR: MPE service is not up and running...')
        LOGGER.debug('MPE IP:' + str(mpe_ip))
        LOGGER.debug("EXIT CODE: " + str(stdout.channel.recv_exit_status()))

    # Check COS - CLM and Cassandra service:
    cos_confg = get_spec_config(Component.COS)
    cos_ip = cos_confg['node1']['ip']
    cos_username = cos_confg['user']
    cos_password = cos_confg['pass']
    ssh_client = get_custom_ssh_client(ip=cos_ip,
                                       username=cos_username,
                                       password=cos_password)
    stdin, stdout, stderr = ssh_client.exec_command("/sbin/service clm status",
                                                    get_pty=True)
    data = stdout.read()
    ssh_client.close()
    LOGGER.debug(str(data))
    if 'passing' in str(data):
        LOGGER.debug('COS: CLM service is running...')
    else:
        raise Exception('ERROR: COS: CLM service is not up and running...')
    LOGGER.debug('COS IP:' + str(cos_ip))
    LOGGER.debug("EXIT CODE: " + str(stdout.channel.recv_exit_status()))

    # Check CMC service:
    cmc_confg = get_spec_config(Component.CMC)
    cmc_ip = cmc_confg['ip']
    cmc_username = cmc_confg['user']
    cmc_password = cmc_confg['pass']
    ssh_client = get_custom_ssh_client(ip=cmc_ip,
                                       username=cmc_username,
                                       password=cmc_password)
    stdin, stdout, stderr = ssh_client.exec_command(
        "/sbin/service cmc_aicc status", get_pty=True)
    data = stdout.read()
    ssh_client.close()
    LOGGER.debug(str(data))
    if 'cmc_aicc is running' in str(data):
        LOGGER.debug('CMC_AICC service is up and running...')
    else:
        raise Exception('ERROR: CMC_AICC service is not up and running...')
    LOGGER.debug('CMC IP: ' + str(cmc_ip))
    LOGGER.debug("EXIT CODE: " + str(stdout.channel.recv_exit_status()))

    # Check MEMSQL service:
    memsql_ip = config_info[Component.MEMSQL]['ip']
    memsql_username = config_info[Component.MEMSQL]['user']
    memsql_password = config_info[Component.MEMSQL]['pass']
    ssh_client = get_custom_ssh_client(ip=memsql_ip,
                                       username=memsql_username,
                                       password=memsql_password)
    stdin, stdout, stderr = ssh_client.exec_command("memsql-ops status",
                                                    get_pty=True)
    data = stdout.read()
    ssh_client.close()
    if 'MemSQL Ops is running' in str(data):
        LOGGER.debug('MEMSQL-OPS is up and running...')
    else:
        raise Exception('ERROR: MEMSQL-OPS is not up and running...')
    LOGGER.debug(str(data))
    LOGGER.debug('MEMSQL IP: ' + str(memsql_ip))
    LOGGER.debug("EXIT CODE: " + str(stdout.channel.recv_exit_status()))
Пример #3
0
from helpers.constants import TestLog
from helpers.constants import TimeFormat
from helpers.constants import ValidationError
from helpers.constants import RecordingAttribute
from helpers.constants import V2pc
from helpers.constants import Component
from helpers.utils import cleanup
from helpers.vmr.vmr_helper import v2pc_edit_manifest_config
from helpers.vmr.vmr_helper import redeploy_config_map
from helpers.vmr.vmr_helper import verify_batch_size_update

pytestmark = pytest.mark.recording
LOGGER = logging.getLogger(TestLog.TEST_LOGGER)

# CONFIG_INFO = yaml.load(pytest.config.getoption(constants.GLOBAL_CONFIG_INFO))
generic_conf = utils.get_spec_config()
private_copy_stream = generic_conf[Component.PRIVATE_COPY_STREAM][Component.STREAM_1][Component.ID] if generic_conf.get(
    Component.PRIVATE_COPY_STREAM) else None

TC_ER_018_DATA = [("bulk_recordings_common", RecordingAttribute.COPY_TYPE_COMMON),
                  ("bulk_recordings_unique", RecordingAttribute.COPY_TYPE_UNIQUE)]


@utils.test_case_logger
@pytest.mark.parametrize("name, copy_type", TC_ER_018_DATA, ids=[x[0] for x in TC_ER_018_DATA])
@pytest.mark.skipif(not generic_conf.get(
    Component.PRIVATE_COPY_STREAM), reason = "Configuration doesn't have private copy stream")
def test_rtc9726_tc_er_018_private_copy(stream, name, copy_type):
    """
    Schedule UNIQUE copy recording (20 - 10 same start/end time, 10 different start/end times), batch size 4.
    """
Пример #4
0
def test_rtc10602_tc_launch_delete(stream):
    """
    TC10602: test module for the longevity delete recordings test suite
    # delete recording as per the configuration for number of deletion and duration for deletion
    """

    longevity_confg = utils.get_spec_config()
    # no need for this numofaccts usage with state 3 dataplane
    # numofaccts = longevity_confg['systemvalues']['numberofaccounts']
    deletespermin = longevity_confg['recording']['recordingspermin']
    test_dur_in_min = longevity_confg['recording']['testdurationinmins']

    try:

        json_file = "recording_list"
        temp_dir = os.path.join(utils.get_base_dir_path(), TestDirectory.TEMP_DIR)
        rec_list_json_file = os.path.join(temp_dir, json_file + '.json')
        assert os.path.exists(rec_list_json_file), ValidationError.FILE_UNAVAILABLE.format(rec_list_json_file)

        with open(rec_list_json_file) as rec_json_file:
            rec_dict = json.load(rec_json_file)
            print rec_dict

        assert rec_dict, ValidationError.FILE_UNAVAILABLE.format(rec_dict)

        # convert yaml parameters to minutes and seconds
        if isinstance(test_dur_in_min, int):
            test_duration_in_seconds = test_dur_in_min * constants.SECONDS_IN_MINUTE

            if isinstance(deletespermin, int):
                # two different scenarios
                # 1: deletion count is lesser than the 60. sleep time will be more than a second.
                # 2: deletion count is more than 60. More than 1 deletion per second.

                deletions_per_sec_rounded = float(deletespermin) / float(test_duration_in_seconds)
                sleep_duration = 1
                if 0.0 < deletions_per_sec_rounded and 1.0 > deletions_per_sec_rounded:
                    deletions_per_sec_rounded = 1
                    sleep_duration = constants.SECONDS_IN_MINUTE / deletespermin
                    total_test_loop_counts = test_duration_in_seconds / sleep_duration
                elif 1 <= int(round(deletions_per_sec_rounded)):
                    deletions_per_sec_rounded = int(round(deletions_per_sec_rounded))
                    sleep_duration = 1
                    total_test_loop_counts = test_duration_in_seconds

                print
                print "[INFO: ] total_test_loop_counts ", total_test_loop_counts
                print "[INFO: ] deletions_per_sec_rounded ", deletions_per_sec_rounded
                print "[INFO: ] sleep duration ", sleep_duration
                print

                rec_list = rec_dict.get('rec_ids')
                for i in range(total_test_loop_counts):
                    rec_list, rec_dict = delete_recordings(deletions_per_sec_rounded, rec_list, rec_dict)
                    time.sleep(sleep_duration)
            else:
                assert False, ValidationError.TYPE_MISMATCH.format('deletespermin is not a number: {0}'.format(deletespermin))

        else:
            assert False, ValidationError.TYPE_MISMATCH.format('testduration is not a number: {0}'.format(testduration))

    finally:
        if rec_list_json_file:
            with open(rec_list_json_file, 'w') as rec_json_file:
                json.dump(rec_dict, rec_json_file)
                print rec_dict
Пример #5
0
def test_dash_wv_pid_based_audio_filtering_ipdvrtests_143(channel):
    """
    JIRA_URL: https://jira01.engit.synamedia.com/browse/IPDVRTESTS-143
    DESCRIPTION: Filtering Audio PID in dash templated and playback with
                 encryption
    """

    package_format = ''
    key_profile_ref = ''
    template_name = None
    pubished_template_data = None
    generic_conf = utils.get_spec_config()
    web_service_obj = None
    recording = None
    metadata = None
    try:
        audio_pids = utils.get_audio_pids(channel, V2PC_EXIST)
        LOGGER.info("Available Audio PIDs : {0}".format(audio_pids))
        assert len(audio_pids.keys()) >= 2, ValidationError.NO_AUDIO_PID
        filter_pids = audio_pids.items()[-1]
        LOGGER.info("filtered pids {0}".format(filter_pids))
        filter_pid_payload = {
            'action': 'disable',
            'pid': str(filter_pids[0]),
            'codec': 'DD/' + str(filter_pids[1]).upper(),
            'type': 'audio'
        }
        LOGGER.info("filtering %s pid from audio pids " % (str(filter_pids[0])))
        LOGGER.info("Audio pids that is available in manifest ")
        templates_list_resp = v2pc_api.get_all_v2pc_templates()
        assert templates_list_resp.status_code == requests.codes.ok, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format(
            templates_list_resp.status_code, templates_list_resp.reason, templates_list_resp.url)
        templt_list = json.loads(templates_list_resp.content)
        for templt in templt_list:
            if templt.get('properties'):
                key_profile_ref = templt['properties'].get('keyProfileRef', '').split('.')[-1]
                package_format = templt['properties'].get('packageFormat', "")
            if (key_profile_ref == V2pc.DASH_TEMPLATE_KEY_PROFILE) and (package_format == V2pc.DASH_TEMPLATE_PACKAGE_FORMAT):
                template_name = templt['name']
                pubished_template_data = templt
                break
        assert key_profile_ref and package_format, ValidationError.DASH_WV_TEMPLATE_UNAVAILABLE

        LOGGER.info("Published Template Data {0}".format(pubished_template_data))
        keys_to_remove = ["externalId", "modified", "sysMeta", "transactionId", "type"]
        metadata = dict([(k, v) for k, v in pubished_template_data.items() if k not in keys_to_remove])
        LOGGER.info("Modified metadata : {0}".format(metadata))
        metadata_modified = metadata.copy()

        stream_config = metadata_modified['properties']['streamConfiguration']
        metadata_modified['properties']['streamConfiguration'] = []
        metadata_modified['properties']['streamConfiguration'].append(filter_pid_payload)

        # Filtering publish templated with PIDs
        LOGGER.info("Payload to publish template : {0}".format(metadata_modified))
        update_template = v2pc_helper.put_publish_template(metadata_modified, template=template_name)
        assert update_template, "Unable to update the published template with renamed segment"

        # Restart the workflow
        result, response = v2pc_helper.restart_media_workflow(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME])
        assert result, response
        result, response = v2pc_helper.waits_till_workflow_active(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME])
        assert result, response

        # Step 1: Create a recording for 30 mins..
        rec_buffer_time = utils.get_rec_duration(dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC)
        rec_duration = utils.get_rec_duration(dur_confg_key=Component.LARGE_REC_LEN_IN_SEC)
        start_time = utils.get_formatted_time((constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS, channel)
        end_time = utils.get_formatted_time((constants.SECONDS * (rec_buffer_time + rec_duration)), TimeFormat.TIME_FORMAT_MS, channel)
        recording = recording_model.Recording(StartTime=start_time, EndTime=end_time, StreamId=channel)
        recording_id = recording.get_entry(0).RecordingId
        web_service_obj = notification_utils.get_web_service_object(recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created=%s", recording.serialize())
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(recording_id, web_service_obj)
        assert is_valid, error

        # Step 2: Playback using DASH publish template configured to filter based on audio PIDs
        LOGGER.info("Playback Recording with Dash Widevine")
        is_valid, error = validate_recordings.validate_playback(recording_id, playback_types=[PlaybackTypes.DASH_WV, ])
        assert is_valid, error

        filtered_pids2 = []
        result = True

        # Step 3: Verify output of manifest curl to match filtering configured in publish template
        playback_url = utils.get_mpe_playback_url(recording_id, playback_type=PlaybackTypes.DASH_WV)
        resp = requests.get(playback_url)
        xml_val = utils.xml_dict(resp.content)
        LOGGER.info("DASH WV MPD Manifest details : {0}".format(xml_val))

        if xml_val["MPD"]["Period"]:
            for period in xml_val["MPD"]["Period"]:
                for adt_set in period["AdaptationSet"]:
                    if adt_set.has_key('contentType') and adt_set['contentType'] == Feed.AUDIO:
                        for rep in adt_set["Representation"]:
                            LOGGER.info("representation list {0}".format(rep))
                            pids_picked = re.findall(re.compile('audio_\d*'),rep["id"])[-1].replace("audio_", '')
                            if pids_picked:
                                filtered_pids2.append(pids_picked)

        LOGGER.info("filtered_pids2 : {0}".format(filtered_pids2))
        
        if filtered_pids2 and (len(filtered_pids2) < len(audio_pids)) and (str(filter_pids[0]) not in filtered_pids2):
            message = "audio pids filtered successfully"
        else:
            message = "filtering not happened properly"
            result = False

        assert result, message

    finally:
        if web_service_obj:
            web_service_obj.stop_server()
        if recording:
            a8.delete_recording(recording)
            LOGGER.info("recording details destroyed.. ")
        if metadata:
            update_template = v2pc_helper.put_publish_template(metadata, template=template_name)
            assert update_template, "Unable to revert the published template"
            result, response = v2pc_helper.restart_media_workflow(generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME])
            assert result, response
from helpers.constants import Component
from helpers.constants import TestLog
from helpers.constants import TimeFormat
from helpers.constants import Archive
from helpers.constants import ValidationError
from helpers.constants import RecordingAttribute
from helpers.constants import VMR_SERVICES_RESTART

LOGGER = logging.getLogger(os.environ.get("PYTEST_XDIST_WORKER", TestLog.TEST_LOGGER))
CONFIG_INFO = utils.get_configInfo()

V2PC_EXIST = (CONFIG_INFO[Component.GENERIC_CONFIG][Component.ENABLE] == "v2pc")
streams_info = utils.get_source_streams(constants.Stream_tags.MULTIPLE_AUDIO_CODEC)
channels = streams_info[Component.STREAMS]

gen_config = utils.get_spec_config()
mpe_config = utils.get_spec_config(Component.MPE)

@utils.test_case_logger
@pytest.mark.parametrize("channel", channels)
@pytest.mark.skipif(V2PC_EXIST is False, reason="V2PC Doesn't Exist")
def test_HLS_Verify_codec_based_audio_filtering_V2PC_ipdvrtests_46(channel):
    """
    JIRA ID : IPDVRTESTS-46
    TITLE   : "HLS : Verify codec based audio filtering"
    STEPS   : Create a 30mins recording with future start time. 
              Playback using the HLS publish template configured to filter based on audio codecs.
              Verify output of manifest curl to match the filtering configured in publish template
    """
    stream = channel
    web_service_obj = None
Пример #7
0
def test_modify_channel_ipdvrtests_57(stream):
    """
    JIRA_URL: https://jira01.engit.synamedia.com/browse/IPDVRTESTS-57
    DESCRIPTION: Modify existing Channels in channel lineup
                 recording the with updated channel and validating playback
    """

    recording = None
    web_service_obj = None
    active_chl_lineup = None
    try:

        generic_confg = utils.get_spec_config()
        workflow = generic_confg[Component.WORK_FLOW][Component.WORKFLOW_NAME]
        response = v2pc_api.get_workflow(workflow)
        workflow_dict = json.loads(response.content)
        LOGGER.debug("[INFO:] workflow_dict %s", workflow_dict)

        assert workflow_dict.get(
            "properties") and workflow_dict["properties"].get(
                "mediaServiceEndpoints"), Feed.CHANNEL_DETAILS_UNAVAILABLE
        for media_service in workflow_dict["properties"][
                "mediaServiceEndpoints"]:
            if media_service.get(
                    "endpointType") == "MEDIASOURCE" and media_service.get(
                        "properties"
                    ) and "assetLineupRef" in media_service["properties"]:
                active_chl_lineup = media_service["properties"][
                    "assetLineupRef"].split(".")[-1]
                break
        assert active_chl_lineup, Feed.CHANNEL_DETAILS_UNAVAILABLE
        LOGGER.debug("[INFO:] active channel lineup detail %s",
                     active_chl_lineup)

        response = v2pc_api.get_channel_line_up(active_chl_lineup)
        LOGGER.debug("[DEBUG:] response %s", response)
        response.raise_for_status()
        LOGGER.debug("[DEBUG:] response content %s", response.content)
        avail_chanl_lineup_dict = json.loads(response.content)

        start_stamp = datetime.utcnow().strftime(TimeFormat.TIME_FORMAT_LOG)
        # Remove the Channel from Lineup
        LOGGER.info("Step 1: Remove existing channel from line up")
        response = v2pc_api.remove_channel_from_channel_line_up(
            stream, avail_chanl_lineup_dict)
        LOGGER.debug("[DEBUG:] response %s", response)
        assert response, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format(
            response.status_code, response.reason, response.url)
        LOGGER.debug("[DEBUG:] response content %s", response.content)
        original_lineup = json.loads(response.content)

        LOGGER.info("Step 1.1: Validate MCE for Core dump")
        is_valid, msg = utils.core_dump("mce")
        assert is_valid, msg

        LOGGER.info("Step 2: Verify Channel not being captured")
        is_avail = v2pc.is_channel_available(stream)
        assert not is_avail, ValidationError.CHANNEL_EXIST.format(stream)

        # Get the channel details to update
        original_channel_details = v2pc_api.get_channel_source(stream)
        LOGGER.debug("Channel Info : %s", original_channel_details.content)

        LOGGER.info("Step 2.1: Adding channel back to channel line up")
        update_lineup = v2pc_api.add_channel_to_channel_line_up(
            stream, avail_chanl_lineup_dict)
        assert update_lineup, ValidationError.INCORRECT_HTTP_RESPONSE_STATUS_CODE.format(
            update_lineup.status_code, update_lineup.reason, update_lineup.url)

        LOGGER.info("Step 3: Verify channel is being moving to Capture State")
        LOGGER.info("[INFO:] Waits till stream to move to capture state ")
        is_valid, error = v2pc.waits_till_channel_capture(stream)
        assert is_valid, error

        result, response = v2pc.restart_media_workflow(
            generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME])
        assert result, response
        result, response = v2pc.waits_till_workflow_active(
            generic_conf[Component.WORK_FLOW][Component.WORKFLOW_NAME])
        assert result, response

        end_stamp = datetime.utcnow().strftime(TimeFormat.TIME_FORMAT_LOG)
        rec_buffer_time = utils.get_rec_duration(
            dur_confg_key=Component.REC_BUFFER_LEN_IN_SEC)
        rec_duration = utils.get_rec_duration(
            dur_confg_key=Component.SHORT_REC_LEN_IN_SEC)
        start_time = utils.get_formatted_time(
            (constants.SECONDS * rec_buffer_time), TimeFormat.TIME_FORMAT_MS)
        end_time = utils.get_formatted_time(
            (constants.SECONDS * (rec_buffer_time + rec_duration)),
            TimeFormat.TIME_FORMAT_MS)
        copy_type = RecordingAttribute.COPY_TYPE_UNIQUE
        LOGGER.debug("Stream Id : %s", stream)
        recording = recording_model.Recording(StartTime=start_time,
                                              EndTime=end_time,
                                              copyType=copy_type,
                                              StreamId=stream)
        recording_id = recording.get_entry(0).RecordingId
        LOGGER.info("Second Recording Id :%s", recording_id)

        LOGGER.info("STEP 3.1: NO ERROR IN MCE LOGS")
        is_valid, error = v2pc.collect_error_log(start_stamp,
                                                 end_stamp,
                                                 TestLog.MCE_ERROR_LOG_PATH,
                                                 recording_id,
                                                 component=Component.MCE,
                                                 is_error=True)
        assert is_valid, error

        LOGGER.info("Step 3.2: Validate MCE for Core dump")
        is_valid, msg = utils.core_dump("mce")
        assert is_valid, msg

        # Create a notification handler
        web_service_obj = notification_utils.get_web_service_object(
            recording_id)
        recording.get_entry(0).UpdateUrl = web_service_obj.get_url()
        LOGGER.debug("Recording instance created : %s", recording.serialize())

        LOGGER.info(
            "Step 4: Create recording on the updated channel and verify its completed"
        )
        response = a8.create_recording(recording)
        is_valid, error = validate_common.validate_http_response_status_code(
            response, requests.codes.no_content)
        assert is_valid, error

        is_valid, error = validate_recordings.validate_recording(
            recording_id, web_service_obj)
        assert is_valid, error

    finally:
        if web_service_obj: web_service_obj.stop_server()
        if recording:
            response = a8.delete_recording(recording)
            LOGGER.debug("Recording clean up status code=%s",
                         response.status_code)

        if active_chl_lineup:
            response = v2pc_api.get_channel_line_up(active_chl_lineup)
            assert response, 'unable to fetch channel line up detail'
            avail_chanl_lineup_dict = json.loads(response.content)
            update_lineup = v2pc_api.add_channel_to_channel_line_up(
                stream, avail_chanl_lineup_dict)
            assert update_lineup, 'unable to revert the channel line up'
Пример #8
0
def setup(request):
    """
    Set up the environment for the test run. This will be executed by default automatically before every test run.
    :param request: the incoming context
    """
    def teardown():
        """
        Clean up the environment post the test run. This will be executed when all the tests are executed.
        """
        test_setup.prepare_test_summary(request)

    request.addfinalizer(teardown)

    base_path = os.path.join(utils.get_base_dir_path(),
                             TestDirectory.CONFIG_DIR)
    kubeconfig_path = os.path.join(base_path, VMR_KUBECONFIG)
    config.load_kube_config(os.path.join(os.environ["HOME"], kubeconfig_path))

    # to cleanup log and change vle_default.conf file interface value based upon the system interface name"
    utils.vle_log_cleanup()
    utils.vle_default_config_interface_change()

    times_are_synchronized, error = test_setup.are_times_synchronized()
    if not times_are_synchronized:
        pytest.fail(error)

    LOGGER.info(
        "All components including the test machine are time synchronized as required"
    )

    playback_host_resolved, error = test_setup.is_playback_host_resolved()
    if not playback_host_resolved:
        pytest.fail(error)

    streams_info, drift_avail, stream_drift_info, drift_applicable = test_setup.find_live_point_drift(
    )
    generic_direct_config = utils.get_direct_config()
    generic_config = utils.get_spec_config()

    if not drift_applicable:
        warnings.warn(
            UserWarning(stream_drift_info +
                        'Not applicable drift streams removed.'))

    # Fail if time drift present in stream, but drift handle is False
    if not (generic_direct_config.get(Component.DRIFT_HANDLE,
                                      False)) and drift_avail:
        message = stream_drift_info + " stream has drift but drift handle is set to False, hence failing the test execution"
        LOGGER.error("Stream -> drift: %s" % stream_drift_info)
        LOGGER.error(message)
        pytest.exit(message)

    # Expose the stream details as environment variable
    if generic_direct_config.get(Component.DRIFT_HANDLE, False):
        status = utils.set_pytest_stream_info(streams_info)

    if utils.is_v2pc():
        v2pc_auth_token = utils.get_v2pc_api_auth_token()
        if not v2pc_auth_token:
            pytest.fail("Unable to fetch V2PC authorization token")
        constants.V2pc.auth_token = v2pc_auth_token