def dump_a2pats_file(model_, folder):
    # type: (model, PathLike) -> bool
    '''
    Dump single A2PATS object

    Parameters:
     * `model_`: (model class object) Model object in A2PATS
     * `folder`: (string or path like string) Location to dump files
    
    **Returns**: (boolean) True on success
    '''
    logger.debug('Now exporting A2PATS model {}'.format(model_.name))
    filename = '{}.{}'.format(model_.name, MODEL_FILES[model_.type].lower())
    # NOTE: The following path joiner works best in Python 3.5+
    # due to the implemenation of os.path.join(path, *paths). If
    # you are the maintainer of this project please be assured
    # that os.path.join(path, *paths) does not accept all types
    # of PathLike objects and you can replace this with a call
    # to os.path.normpath or os.path.realpath should there
    # be issues on legacy versions of Python.
    filepath = join(folder, filename)
    header = to_str_section(model_)
    sections = [header, ''] + model_.converted_data
    try:
        with open(filepath, 'w') as fp:
            fp.write('\n'.join(sections))
        return True
    except:
        return False
Exemple #2
0
def split_header_auto(table, default=DEF_HDRRW_CNT):
    # type: (List[List[str]], int) -> Tuple[List[List[str]], int]
    '''
    Automatically detect header height

    Parameters:
     * `table`: (list of list of strings) 2D matrix used to create the table
     * `default`: (int) default number of header rows
    
    **Returns**: (tuple of list of list of strings and integer) Formatted table and number of header rows
    '''
    # TODO
    logger.debug(
        'Unable to determine header height, using default {}'.format(default))
    if len(table) < 1:
        logger.warn(
            'Table was empty, nothing to do in app.util.tables.split_header_auto'
        )
        return table, 0
    headers = [s.rsplit(' ', 1) for s in table[0]]
    headers_n = [[h[0] for h in headers],
                 [h[1] if len(h) > 1 else str() for h in headers]]
    if len(table) < 2:
        logger.warn('Table data was empty, but headers were rearranged')
        return headers_n, len(headers_n)
    return headers_n + table[1:], len(headers_n)
def traverse_xml_tree(parent, stack_size=0):
    # type: (ElementTree.Element, int) -> dict
    '''
    Traverse the XML tree and return it as a dictionary

    Parameters:
     * `parent`: Element tree to traverse
     * `stack_size`: (integer, optional) Stack size
    
    **Returns**: (dictionary) XML tree as dictionary
    '''
    if stack_size == 0:
        logger.debug('Now traversing XML tree beginning at initial stack')
    data = dict()
    for child in parent:
        if child.text:
            text = child.text.strip()
        if not text:
            text = traverse_xml_tree(child, stack_size + 1)
        if child.tag in data:
            if type(data[child.tag]) is not list:
                data[child.tag] = [data[child.tag]]
            data[child.tag].append(text)
        else:
            data[child.tag] = text
    return data
Exemple #4
0
def convert(input_file, output_file):
    # type: (str, str) -> a2pats
    '''
    Load CEESIM file and convert to A²PATS. Output files that do not
    exist are created automatically.

    Parameters:
     * `input_file`: (string) Location of file to import
     * `output_file`: (string) Location of file to export (does not have to exist)

    **Returns**: (a2pats class object) A²PATS object (if successful)
    '''
    logger.debug(
        'Main app converter called, using provided input and output files')
    input_data = import_(input_file, ceesim)  # -> CEESIM object
    emitter_modes = split_emitter_modes(input_data.imported_data)
    for i, emitter_mode in enumerate(emitter_modes):
        mode_num = "0" + str(i + 1) if len(str(i + 1)) == 1 else str(i + 1)
        logger.debug("!!Working on Emitter Mode {}".format(mode_num))
        scan_type = determine_scan_type(emitter_mode)
        logger.info("Scan Type: {}".format(scan_type))
        path_to_tables = "data/c_tables/"
        lookup_table = prepare_lookup_table(path_to_tables + scan_file_name[scan_type] + "base.csv", \
                                                path_to_tables + scan_file_name[scan_type] + "base.json")
        output_data = convert_to_a2pats(emitter_mode, lookup_table)
        if scan_type == "HELICAL":  # stacked circular scans
            mode_num += " (Needs Edit)"
        success = dump_a2pats(output_data,
                              output_file + "/{}".format(mode_num))
    '''
Exemple #5
0
def flatten_table(ceesim_data, stack_size=1):
    # type: (dict, int) -> dict
    '''
    Flattens a table

    Parameters:
     * `ceesim_data`: (dictionary) Import ceesim_data from JSON
     * `stack_size`: (int) Number of function calls in recursive mode
    
    **Returns**: (dictionary) Flattened 1 level table
    '''
    logger.debug(
        'Flattening input table with call stack size at {}'.format(stack_size))
    data_ = dict()
    for key in ceesim_data:
        if key not in data_:
            if type(ceesim_data[key]) not in {dict, list}:
                logger.debug(
                    '{} was not in data, and was added to the flat table'.format(key))
                data_[key] = ceesim_data[key]
            else:
                logger.debug(
                    'Now processing lookup table for subkeys of {}'.format(key))
                frame = ceesim_data[key]
                if type(frame) is list:
                    if len(frame) < 1 or frame[0] is not dict:
                        continue
                    else:
                        frame = frame[0]
                subdict = flatten_table(frame, stack_size + 1)
                subdict.update(data_)
                data_ = subdict
    logger.debug('Now returning flat table with size of {}'.format(len(data_)))
    return data_
def import_ceesim(fp):
    # type: (TextIO) -> ceesim
    '''
    Import a CEESIM file for conversion

    Parameters:
     * `fp`: File pointer

    **Returns**: Imported CEESIM object
    '''
    logger.debug(
        'CEESIM importer called, begining CEESIM import with XML file')
    itr = ElementTree.iterparse(fp)
    strip_xml_namespaces(itr)
    data = traverse_xml_tree(itr.root)
    store = ceesim(data)
    return store
def strip_xml_namespaces(itr):
    # type: (Iterator) -> None
    '''
    Strip XML namespaces from an iterator provided by XML parser

    Parameters:
     * `itr`: Iterator from an ElementTree
    
    **Returns**: None, strip happens in place
    '''
    logger.debug('Stripping all namespaces from imported CEESIM file')
    for _, element in itr:
        if '}' in element.tag:
            element.tag = element.tag.split('}', 1)[1]
        for attribute in list(element.attrib.keys()):
            if '}' in attribute:
                new_attribute = attribute.split('}', 1)[1]
                element.attrib[new_attribute] = element.attrib[attribute]
                del element.attrib[attribute]
Exemple #8
0
def convert_to_a2pats(ceesim_data, lookup_table):
    # type: (ceesim, dict) -> a2pats
    '''
    Convert CEESIM data to A²PATS data

    Parameters:
     * `ceesim_data`: (ceesim class object) CEESIM emitter mode data to import
     * `lookup_table`: (dictionary) Looktable table JSON
    
    **Returns**: (a2pats class object) A2PATS data
    '''
    logger.info('Beginning CEESIM to A2PATS conversion')
    flattened_data = flatten_table(ceesim_data)
    store = a2pats(imported_type='A2PATS')
    generic_models = generate_models(
        ceesim_data, flattened_data, lookup_table)
    logger.debug('Added {} models from generate_models'.format(
        len(generic_models)))
    store.models += generic_models
    return store
def dump_a2pats(obj, folder):
    # type: (a2pats, PathLike) -> bool
    '''
    Dump A2PATS object

    Parameters:
     * `obj`: (a2pats class object) A2PATS object
     * `folder`: (str or Path like object) Folder location to export
    
    **Returns**: (boolean) True on success
    '''
    if isfile(folder):
        logger.error('{} is a filepath, a folder cannot be placed here. Aborting'.format(folder))
        return False
    if not isdir(folder):
        logger.debug('{} does not exist, creating folder'.format(folder))
        mkdir(folder)
    logger.info('Now exporting A2PATS to folder {}'.format(folder))
    for model_ in obj.models:
        dump_a2pats_file(model_, folder)
    return True
def dump(obj, folder, export_type=a2pats):
    # type: (datastore, PathLike, type) -> bool
    '''Dump data dynmically

    Parameters:
     * `obj`: (datastore class object) Datastore object
     * `folder`: (str or path like object) Folder location of export
     * `export_type`: (class) Export type
    
    **Returns**: (boolean) True on success
    '''
    logger.debug('Generic dump function called, auto-detcting type...')
    assert issubclass(
        type(obj), datastore), 'Your data must be a datastore-like object!'
    if type(obj) is a2pats:
        logger.debug('Detected A2PATS, exporting as A2PATS')
        return dump_a2pats(obj, folder)
    elif type(obj) is ceesim:
        logger.debug('Detected CEESIM, exporting as CEESIM')
        return None
    assert type(
        obj) is not export_type, 'You cannot auto-dump a datastore object!'
Exemple #11
0
    def create_converted(model, opt):
        # type: (model, list) -> None
        '''
        Converts in-place model

        Parameters:
         * `model`: (model class object) Model object
         * `opt`: (list) Options from lookup table
        
        **Returns**: None, table is filled in place
        '''

        if type(opt["TABLE"]) is int:
            table_string = build_table_str(ceesim_data, lookup_table, opt[FILE_HDR], opt["SECTION"], 
                                        opt[PRI_HDR], convert_one_key, obtain_relevant_tags, opt[FILE_HDR] == "ANT")
            fill_table(model, opt[PRI_HDR], table_string)
            return None # No other conversion should occur after building table

        values = []
        for tag in opt[TAG_HDR].split('&'):
            tags = obtain_relevant_tags(ceesim_data, ceesim_flattened, tag) # removed taking the zero-index as it's done below
            if tags:
                value = tags[0]  
                logger.debug("Found tag {} for {} in {} with value: {}".format(opt[TAG_HDR],
                                                                            opt["LABEL"], opt[FILE_HDR], value))
            else:
                if opt[TAG_HDR]:
                    logger.debug('Could not find tag {}, using default value for {} in {}: {}'.format(opt[TAG_HDR],
                                                                                                    opt["LABEL"], opt[FILE_HDR], opt[DEFAULT_HDR]))
                else:
                    logger.debug('Using default value for tagless {} in {}: {}'.format(
                        opt["LABEL"], opt[FILE_HDR], opt[DEFAULT_HDR]))
                value = opt[DEFAULT_HDR]
            values.append(value)

        converted = convert_one_key(opt, values)
        fill_table(model, opt[PRI_HDR], converted) # adds to model.converted_data
def import_(fp, classtype=datastore, downgrade_peaceful=True):
    # type: (Union[str, TextIO], type, bool) -> datastore
    '''Import data dynamically

    :param fp: File pointer or string to import file
    :type fp: str or file-like pointer

    :param classtype: Class type to import as (must be of instance datastore)
    :type classtype: datastore or datastore-like

    :param downgrade_peaceful: Whether or not to ignore errors
    :type downgrade_peaceful: bool

    :returns: Database object for typing
    :rtype: datastore or datastore-like
    '''
    logger.debug(
        'Generic importer called, automatically detecting import type')
    if downgrade_peaceful:
        if not issubclass(classtype, datastore):
            # Assume CEESIM import
            logger.debug('Unable to determine type, assuming CEESIM')
            classtype = ceesim
        if type(fp) is str:
            logger.debug('Now opening file {} for import'.format(fp))
            try:
                fp = open(fp, encoding='utf-8')
            except:
                logger.debug('Failed to open, returning empty object')
                return datastore()
        elif not isinstance(fp, IOBase):
            logger.debug('Unrecognizable file pointer, returning empty object')
            return datastore()
    else:
        assert issubclass(
            classtype, datastore
        ), 'Your import_ call must be of datastore or datastore-like type!'
        if type(fp) is str:
            logger.debug('Now opening file {} for import'.format(fp))
            fp = open(fp, encoding='utf-8')
        assert isinstance(
            fp, IOBase
        ), 'Your import_ call must provide a valid files-like pointer or file path!'

    if classtype is a2pats:
        logger.debug('A2PATS file provided, importing as A2PATS')
        return import_a2pats(fp)
    elif classtype is ceesim:
        logger.debug('CEESIM file provided, importing as CEESIM')
        return import_ceesim(fp)
    else:
        raise ValueError('This type of datastore isn\'t supported yet!')
Exemple #13
0
def generate_models(ceesim_data, ceesim_flattened, lookup_table):
    # type: (dict, dict, dict) -> list
    '''
    Generate all non INP/PUL models

    Parameters:
     * `ceesim_data`: (dictionary) Imported CEESIM JSON data
     * `ceesim_flattened`: (dictionary, optional) Flattened CEESIM JSON data
     * `lookup_table`: (dictionary) Lookup table JSON imported
    
    **Returns**: (list) List of models
    '''
    logger.debug('Now generating all non-signal models')

    def fill_table(model, pos, value):
        # type: (model, int, str) -> None
        '''
        Fills out table from model with values

        Parameters:
         * `model`: (model class object) Model object
         * `pos`: (int) Position index
         * `value`: (str) Value to fill
        
        **Returns**: None, table is filled in-place
        '''
        if len(model.converted_data) <= pos:
            model.converted_data += [''] * \
                (pos - len(model.converted_data) + 1)
        model.converted_data[pos] = value

    def create_converted(model, opt):
        # type: (model, list) -> None
        '''
        Converts in-place model

        Parameters:
         * `model`: (model class object) Model object
         * `opt`: (list) Options from lookup table
        
        **Returns**: None, table is filled in place
        '''

        if type(opt["TABLE"]) is int:
            table_string = build_table_str(ceesim_data, lookup_table, opt[FILE_HDR], opt["SECTION"], 
                                        opt[PRI_HDR], convert_one_key, obtain_relevant_tags, opt[FILE_HDR] == "ANT")
            fill_table(model, opt[PRI_HDR], table_string)
            return None # No other conversion should occur after building table

        values = []
        for tag in opt[TAG_HDR].split('&'):
            tags = obtain_relevant_tags(ceesim_data, ceesim_flattened, tag) # removed taking the zero-index as it's done below
            if tags:
                value = tags[0]  
                logger.debug("Found tag {} for {} in {} with value: {}".format(opt[TAG_HDR],
                                                                            opt["LABEL"], opt[FILE_HDR], value))
            else:
                if opt[TAG_HDR]:
                    logger.debug('Could not find tag {}, using default value for {} in {}: {}'.format(opt[TAG_HDR],
                                                                                                    opt["LABEL"], opt[FILE_HDR], opt[DEFAULT_HDR]))
                else:
                    logger.debug('Using default value for tagless {} in {}: {}'.format(
                        opt["LABEL"], opt[FILE_HDR], opt[DEFAULT_HDR]))
                value = opt[DEFAULT_HDR]
            values.append(value)

        converted = convert_one_key(opt, values)
        fill_table(model, opt[PRI_HDR], converted) # adds to model.converted_data


    def add_headers(mfile, model):
        # type: (str, model) -> None
        '''
        Adds headers to converted model

        Parameters:
         * `mfile`: (string) Name of file
         * `model`: (model) Model class object

        **Returns**: None, data is converted in place
        '''
        with open("data/headers.csv") as head:
            headers = reader(head)
            for header in headers:
                if header[0] == mfile:
                    left = (int(header[2]) - len(header[1]) - 3) / 2
                    lleft = int(left)
                    right = lleft
                    if lleft == left:
                        left = lleft
                        right = lleft + 1
                    htext = "//" + ' '.join(
                        ['*' * lleft, header[1], '*' * right])
                    h_priority = int(header[3])
                    if determine_scan_type(ceesim_data) == "LORO" and mfile == "SIG" and (header[1] == "ANTENNA MODEL" or header[1] == "MULTIPLE SIMULTANEOUS SIGNALS"):
                        # Resolves a downstream feature displacement that occurs from the inclusion of all scan data within the .sig file's SCAN MODEL
                        h_priority += 3
                    fill_table(model, h_priority, htext)
    models = list()
    name = form_model_name(ceesim_data, ceesim_flattened)
    timestamp = obtain_relevant_tags(ceesim_data, ceesim_flattened, "LastUpdateDate")[0]
    logger.debug('Generic model generator using name: {}'.format(name))
    for mtype in AUTO_MODELS:
        next_model = model(mtype, name, timestamp)
        if mtype not in MODEL_FILES:
            logger.warn(
                'Could not find mtype {} in model files, skipping'.format(mtype))
            continue
        table_key = MODEL_FILES[mtype]
        add_headers(table_key, next_model)
        if table_key not in lookup_table:
            logger.warn(
                'Could not find key {} in lookup table, skipping'.format(table_key))
            continue
        logger.debug(
            'Now processing table key {} with mtype {}'.format(table_key, mtype))
        for cdict_key in lookup_table[table_key]:
            key_data = lookup_table[table_key][cdict_key]
            if MULTI_HDR in key_data and TABLE_DATA in key_data:
                data_opts = key_data[TABLE_DATA]
                for opt in data_opts:
                    if opt["SECTION"] == "Main":
                        continue
                    create_converted(next_model, opt)
            else:
                if key_data["SECTION"] == "Main":
                    continue
                create_converted(next_model, key_data)

        models.append(next_model)

    # Handles Intrapule here
    inp_deviations = obtain_relevant_tags(ceesim_data, ceesim_flattened, "LinearFreqDeviation")
    inp_durations = obtain_relevant_tags(ceesim_data, ceesim_flattened, "LinearFreqDuration")
    inp_info = zip(inp_deviations, inp_durations)

    for i, info in enumerate(inp_info):
        newName = "{}-{}".format(name, i + 1)
        next_model = model('INTRAPULSE', newName, timestamp)
        table_key = MODEL_FILES['INTRAPULSE']
        add_headers(table_key, next_model)
        if table_key not in lookup_table:
            logger.warn(
                'Could not find key {} in lookup table, skipping'.format(table_key))
            continue
        logger.debug(
            'Now processing table key {} with mtype {}'.format(table_key, 'INTRAPULSE'))

        for cdict_key in lookup_table[table_key]:
            key_data = lookup_table[table_key][cdict_key]
            if MULTI_HDR in key_data and TABLE_DATA in key_data:
                data_opts = key_data[TABLE_DATA]
                for opt in data_opts:
                    if opt["SECTION"] == "Main":
                        continue
                    create_converted(next_model, opt)
            else:
                if key_data["SECTION"] == "Main":
                    continue
                if not key_data["TAG"]:
                    create_converted(next_model, key_data)
                else:
                    if key_data["TAG"] == "LinearFreqDeviation":
                        fill_table(next_model, key_data[PRI_HDR], convert_one_key(key_data, [info[0]]))
                    if key_data["TAG"] == "LinearFreqDeviation&LinearFreqDuration":
                        fill_table(next_model, key_data[PRI_HDR], convert_one_key(key_data, info))

        models.append(next_model)

    return models
Exemple #14
0
async def shutdown_events():
    logger.debug("Shutdown!")
Exemple #15
0
async def startup_event():
    logger.debug("App startup!")
    init_db(app)