Example #1
0
def test_json_response_generation(xls_input, expected_response_file):
    """ tests if json response is correctly generated for all combinations of requests
    """
    data = convert_service_sheet(xls_input, eqpt_filename)
    equipment = load_equipment(eqpt_filename)
    network = load_network(xls_input, equipment)
    p_db = equipment['SI']['default'].power_dbm

    p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,\
        equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
    build_network(network, equipment, p_db, p_total_db)
    rqs = requests_from_json(data, equipment)
    rqs = correct_route_list(network, rqs)
    dsjn = disjunctions_from_json(data)
    dsjn = correct_disjn(dsjn)
    rqs, dsjn = requests_aggregation(rqs, dsjn)
    pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
    propagatedpths = compute_path_with_disjunction(network, equipment, rqs, pths)
    result = []
    for i, pth in enumerate(propagatedpths):
        result.append(Result_element(rqs[i], pth))
    temp = {
        'response': [n.json for n in result]
    }
    # load expected result and compare keys
    # (not values at this stage)
    with open(expected_response_file) as jsonfile:
        expected = load(jsonfile)

    for i, response in enumerate(temp['response']):
        assert compare_response(expected['response'][i], response)
Example #2
0
def load_requests(filename,eqpt_filename):
    if filename.suffix.lower() == '.xls':
        logger.info('Automatically converting requests from XLS to JSON')
        json_data = convert_service_sheet(filename,eqpt_filename)
    else:
        with open(filename, encoding='utf-8') as f:
            json_data = loads(f.read())
    return json_data
Example #3
0
def test_excel_service_json_generation(xls_input, expected_json_output):
    convert_service_sheet(xls_input, eqpt_filename)

    actual_json_output = f'{str(xls_input)[:-4]}_services.json'
    with open(actual_json_output, encoding='utf-8') as f:
        actual = load(f)
    unlink(actual_json_output)

    with open(expected_json_output, encoding='utf-8') as f:
        expected = load(f)

    results = compare_services(expected, actual)
    assert not results.requests.missing
    assert not results.requests.extra
    assert not results.requests.different
    assert not results.synchronizations.missing
    assert not results.synchronizations.extra
    assert not results.synchronizations.different
Example #4
0
def load_requests(filename, eqpt_filename, bidir):
    """ loads the requests from a json or an excel file into a data string
    """
    if filename.suffix.lower() == '.xls':
        LOGGER.info('Automatically converting requests from XLS to JSON')
        try:
            json_data = convert_service_sheet(filename,
                                              eqpt_filename,
                                              bidir=bidir)
        except ServiceError as this_e:
            print(
                f'{ansi_escapes.red}Service error:{ansi_escapes.reset} {this_e}'
            )
            exit(1)
    else:
        with open(filename, encoding='utf-8') as my_f:
            json_data = loads(my_f.read())
    return json_data
Example #5
0
# -*- coding: utf-8 -*-

"""
convert_service_sheet.py
========================

XLS parser that can be called to create a JSON request file in accordance with
Yang model for requesting path computation.

See: draft-ietf-teas-yang-path-computation-01.txt
"""

from argparse import ArgumentParser
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
from json import dumps

from gnpy.core.service_sheet import Request, Element, Request_element
from gnpy.core.service_sheet import parse_row, parse_excel, convert_service_sheet

logger = getLogger(__name__)

if __name__ == '__main__':
    args = parser.parse_args()
    basicConfig(level={2: DEBUG, 1: INFO, 0: CRITICAL}.get(args.verbose, CRITICAL))
    logger.info(f'Converting Service sheet {args.workbook!r} into gnpy JSON format')
    if args.output is None:
        data = convert_service_sheet(args.workbook,'eqpt_config.json')
        print(dumps(data, indent=2))
    else:
        data = convert_service_sheet(args.workbook,'eqpt_config.json',args.output)
Example #6
0
def test_json_response_generation(xls_input, expected_response_file):
    """ tests if json response is correctly generated for all combinations of requests
    """
    data = convert_service_sheet(xls_input, eqpt_filename)
    # change one of the request with bidir option to cover bidir case as well
    data['path-request'][2]['bidirectional'] = True

    equipment = load_equipment(eqpt_filename)
    network = load_network(xls_input, equipment)
    p_db = equipment['SI']['default'].power_dbm

    p_total_db = p_db + lin2db(automatic_nch(equipment['SI']['default'].f_min,\
        equipment['SI']['default'].f_max, equipment['SI']['default'].spacing))
    build_network(network, equipment, p_db, p_total_db)
    oms_list = build_oms_list(network, equipment)
    rqs = requests_from_json(data, equipment)
    rqs = correct_route_list(network, rqs)
    dsjn = disjunctions_from_json(data)
    dsjn = correct_disjn(dsjn)
    rqs, dsjn = requests_aggregation(rqs, dsjn)
    pths = compute_path_dsjctn(network, equipment, rqs, dsjn)
    propagatedpths, reversed_pths, reversed_propagatedpths = \
        compute_path_with_disjunction(network, equipment, rqs, pths)
    pth_assign_spectrum(pths, rqs, oms_list, reversed_pths)

    result = []
    for i, pth in enumerate(propagatedpths):
        # test ServiceError handling : when M is zero at this point, the
        # json result should not be created if there is no blocking reason
        if i == 1:
            my_rq = deepcopy(rqs[i])
            my_rq.M = 0
            with pytest.raises(ServiceError):
                Result_element(my_rq, pth, reversed_propagatedpths[i]).json

            my_rq.blocking_reason = 'NO_SPECTRUM'
            Result_element(my_rq, pth, reversed_propagatedpths[i]).json

        result.append(Result_element(rqs[i], pth, reversed_propagatedpths[i]))

    temp = {'response': [n.json for n in result]}
    # load expected result and compare keys and values

    with open(expected_response_file) as jsonfile:
        expected = load(jsonfile)
        # since we changes bidir attribute of request#2, need to add the corresponding
        # metric in response

    for i, response in enumerate(temp['response']):
        if i == 2:
            # compare response must be False because z-a metric is missing
            # (request with bidir option to cover bidir case)
            assert not compare_response(expected['response'][i], response)
            print(f'response {response["response-id"]} should not match')
            expected['response'][2]['path-properties']['z-a-path-metric'] = [{
                'metric-type':
                'SNR-bandwidth',
                'accumulative-value':
                22.809999999999999
            }, {
                'metric-type':
                'SNR-0.1nm',
                'accumulative-value':
                26.890000000000001
            }, {
                'metric-type':
                'OSNR-bandwidth',
                'accumulative-value':
                26.239999999999998
            }, {
                'metric-type':
                'OSNR-0.1nm',
                'accumulative-value':
                30.32
            }, {
                'metric-type':
                'reference_power',
                'accumulative-value':
                0.0012589254117941673
            }, {
                'metric-type':
                'path_bandwidth',
                'accumulative-value':
                60000000000.0
            }]
            # test should be OK now
        else:
            assert compare_response(expected['response'][i], response)
            print(f'response {response["response-id"]} is not correct')