Exemplo n.º 1
0
    def test_graph_orm_mapping_update_from_mapping_with_duplicate(self):
        """
        Test update from other orm mapping
        """

        self.orm.node_mapping.add(ORMtestTgf6, lambda x: x.get('key') == 'six')

        # Build second ORM
        second_orm = GraphORM()
        for cls in (ORMtestTgf9, ORMtestTgf6, ORMtestBi):
            second_orm.node_mapping.add(cls, lambda x: x.get('key') == 'six')

        # Update
        self.orm.node_mapping.update(second_orm.node_mapping)
        self.assertEqual(len(self.orm.node_mapping), 3)
Exemplo n.º 2
0
    def setUp(self):
        """
        ConfigHandlerTests class setup

        Load graph from file and assign custom classes to labels and register
        with the ORM.
        """

        self.graph = read_tgf(self._gpf_graph)

        self.orm = GraphORM()
        self.orm.edge_mapping.add(ORMtestMo, lambda x: x.get('label') == 'mo')
        self.orm.edge_mapping.add(ORMtestBi, lambda x: x.get('label') == 'bi')
        self.orm.node_mapping.add(ORMtestTgf6, lambda x: x.get('key') == 'six')
        self.orm.node_mapping.add(
            ORMtestTgf9,
            lambda x: x.get('key') == 'nine' or x.get('ids') == 'edi')

        self.graph.orm = self.orm
        self.graph.nodes[6]['add'] = 6
        self.graph.nodes[6]['ids'] = 'edi'
Exemplo n.º 3
0
    def setUp(self):
        """
        Init empty GraphORM object
        """

        self.orm = GraphORM()
Exemplo n.º 4
0
class LabeledRangeQuadrupleArray(NodeAxisTools):

    def validate(self, key=None):
        if len(self.r.children()) != 4:
            return haddock_validation_warning(self, 'Symmetry restraint should be of length 4')


class LabeledRangeQuintupleArray(NodeAxisTools):

    def validate(self, key=None):
        if len(self.r.children()) != 5:
            return haddock_validation_warning(self, 'Symmetry restraint should be of length 5')


# Predefined HADDOCK ORM mapper
haddock_orm = GraphORM()
haddock_orm.node_mapping.add(HaddockRunParameters, lambda x: x.get('haddock_type') == 'HaddockRunParameters')
haddock_orm.node_mapping.add(FloatArray, lambda x: x.get('haddock_type') == 'FloatArray')
haddock_orm.node_mapping.add(Range, lambda x: x.get('haddock_type') == 'Range')
haddock_orm.node_mapping.add(ExtStageConstants, lambda x: x.get('haddock_type') == 'ExtStageConstants')
haddock_orm.node_mapping.add(HaddockPartnerParameters, lambda x: x.get('haddock_type') == 'HaddockPartnerParameters')
haddock_orm.node_mapping.add(CNSRestraintFiles, lambda x: x.get('haddock_type') == 'CNSRestraintFile')
haddock_orm.node_mapping.add(FlexSegmentList, lambda x: x.get('haddock_type') == 'SemiflexSegmentList')
haddock_orm.node_mapping.add(FlexSegmentList, lambda x: x.get('haddock_type') == 'SegmentList')
haddock_orm.node_mapping.add(PDBData, lambda x: x.get('haddock_type') == 'PDBData')
haddock_orm.node_mapping.add(LabeledRangePairArray, lambda x: x.get('haddock_type') == 'LabeledRangePairArray')
haddock_orm.node_mapping.add(LabeledRangeTripleArray, lambda x: x.get('haddock_type') == 'LabeledRangeTripleArray')
haddock_orm.node_mapping.add(LabeledRangeQuadrupleArray, lambda x: x.get('haddock_type') == 'LabeledRangeQuadrupleArray')
haddock_orm.node_mapping.add(LabeledRangeQuintupleArray, lambda x: x.get('haddock_type') == 'LabeledRangeQuintupleArray')

for tbldata in ('tbldata', 'dihedraldata', 'rdcdata', 'danidata', 'tensordata', 'pcsdata'):
Exemplo n.º 5
0
def write_pydata(graph, default=None, allow_none=True, export_all=False, include_root=False):
    """
    Export a graph to a (nested) dictionary

    Convert graph representation of the dictionary tree into a dictionary
    using a nested representation of the dictionary hierarchy.

    Dictionary keys and values are obtained from the node attributes using
    `key_tag` and `value_tag`. The key_tag is set to graph key_tag by default.
    Export using these primary key_tag/value_tag pairs is de default
    behaviour. If a node contains more data these can be exported as part of
    a dictionary using the `export_all` argument.

    .. note:: `export_all` is important when dictionary data structures where
              imported using level=1 in `read_pydata`. In this case, all key
              value pairs at the same dictionary level are contained in the
              same node.

    Node values that are 'None' are exported by default unless `allow_none`
    equals False.
    If the key_tag exists but value_tag is absent use `default` as default.

    .. note:: if a graph is composed out of multiple, independent subgraphs
              only the subgraph for which the root node is defined will be
              exported. To export all, iterate over the subgraphs and define
              the appropriate root for each of them.

    :param graph:          Graph object to export
    :type graph:           :graphit:GraphAxis
    :param default:        value to use when node value was not found using
                           value_tag.
    :type default:         mixed
    :param allow_none:     allow None values in the output
    :type allow_none:      :py:bool
    :param export_all:     Export the full node storage dictionary.
    :type export_all:      :py:bool
    :param include_root:   Include the root node in the hierarchy
    :type include_root:    :py:bool

    :rtype:                :py:dict
    """

    # No nodes, return empty dict
    if graph.empty():
        logging.info('Graph is empty: {0}'.format(repr(graph)))
        return {}

    # Graph should be of type GraphAxis with a root node nid defined
    if not isinstance(graph, GraphAxis):
        raise TypeError('Unsupported graph type {0}'.format(type(graph)))
    if graph.root is None:
        raise GraphitException('No graph root node defines')

    # Build ORM with format specific conversion classes
    pydataorm = GraphORM(inherit=False)
    pydataorm.node_mapping.add(ParseDictionaryType, lambda x: x.get('format') == 'dict')
    pydataorm.node_mapping.add(ParseListType, lambda x: x.get('format') == 'list')
    pydataorm.node_mapping.add(ParseSetType, lambda x: x.get('format') == 'set')
    pydataorm.node_mapping.add(ParseTupleType, lambda x: x.get('format') == 'tuple')

    # Set current ORM aside and register new one.
    curr_orm = graph.orm
    graph.orm = pydataorm

    # Set current NodeTools aside and register new one
    curr_nt = graph.node_tools
    graph.node_tools = PyDataNodeTools

    # Define start node for recursive export
    if len(graph) > 1:
        root = graph.getnodes(resolve_root_node(graph))
    else:
        root = graph.getnodes(list(graph.nodes.keys()))

    # Start recursive parsing
    # If we export the full node dictionary, also export None key/value pairs
    root_key, data = root.serialize(allow_none=True if export_all else allow_none,
                                    export_all=export_all, default=default)

    # Include root_key or not
    if include_root and root_key:
        data = {root_key: data}

    # Restore original ORM and NodeTools
    graph.node_tools = curr_nt
    graph.orm = curr_orm

    return data
Exemplo n.º 6
0
        query_params = self._process_parameters(kwargs,
                                                self.parameters(loc='query'))
        path_params = self._process_parameters(kwargs,
                                               self.parameters(loc='path'))
        url = self.url()
        url = url.format(**path_params)
        logging.info('Calling: {0}'.format(url))

        http_method = self.http_method()
        if http_method == 'get':
            response = requests.get(url, params=query_params)
        elif http_method == 'post':
            response = requests.post(url, params=query_params)

        status_code = str(response.status_code)
        logging.info('Called: {0}'.format(response.url))
        logging.info('Response in {0} with status code {1}'.format(
            response.elapsed, status_code))

        status_response = self.xpath('//{0}'.format(status_code))
        if not status_response.empty():
            logging.info(status_response.description)

        return response.text


OpenAPIORM = GraphORM()
OpenAPIORM.node_mapping.add(OpenAPIMethod,
                            lambda x: x.get('key', '').startswith('/'))
                    'Length of array {0} ({1}) larger then maximum {2}'.format(
                        key, length, self.get('maxItems')), self)
            if length < self.get('minItems', length):
                raise GraphitValidationError(
                    'Length of array {0} ({1}) smaller then minimum {2}'.
                    format(key, length, self.get('minItems')), self)
            if self.get('uniqueItems', False):
                if len(set(value)) > 1:
                    raise GraphitValidationError(
                        'Items in array {0} must be unique, got: {1}'.format(
                            key, set(value)), self)

        self.nodes[self.nid][key] = value


JSONSchemaORMDraft07 = GraphORM()
JSONSchemaORMDraft07.node_mapping.add(StringType,
                                      lambda x: x.get('type') == 'string')
JSONSchemaORMDraft07.node_mapping.add(IntegerType,
                                      lambda x: x.get('type') == 'integer')
JSONSchemaORMDraft07.node_mapping.add(NumberType,
                                      lambda x: x.get('type') == 'number')
JSONSchemaORMDraft07.node_mapping.add(BooleanType,
                                      lambda x: x.get('type') == 'boolean')
JSONSchemaORMDraft07.node_mapping.add(ArrayType,
                                      lambda x: x.get('type') == 'array')
JSONSchemaORMDraft07.node_mapping.add(Email,
                                      lambda x: x.get('type') == 'email')
JSONSchemaORMDraft07.node_mapping.add(Email,
                                      lambda x: x.get('type') == 'idn-email')
JSONSchemaORMDraft07.node_mapping.add(DateTime,
Exemplo n.º 8
0
"""

from graphit.graph_orm import GraphORM
from graphit.graph_model_classes.model_user import User
from graphit.graph_model_classes.model_datetime import DateTime
from graphit.graph_model_classes.model_identifiers import UUID
from graphit.graph_model_classes.model_files import FilePath
from graphit.graph_io.io_jsonschema_format_drafts import StringType

from .task_python_type import PythonTask, BlockingPythonTask, LoadCustomFunc
from .task_wamp_type import WampTask
from .task_loop_type import LoopTask

task_types = ('PythonTask', 'BlockingPythonTask', 'WampTask', 'LoopTask')

# Define the workflow Task ORM
WORKFLOW_ORM = GraphORM(inherit=False)
WORKFLOW_ORM.node_mapping.add(PythonTask, lambda x: x.get('task_type') == 'PythonTask')
WORKFLOW_ORM.node_mapping.add(BlockingPythonTask, lambda x: x.get('task_type') == 'BlockingPythonTask')
WORKFLOW_ORM.node_mapping.add(WampTask, lambda x: x.get('task_type') == 'WampTask')
WORKFLOW_ORM.node_mapping.add(LoopTask, lambda x: x.get('task_type') == 'LoopTask')
WORKFLOW_ORM.node_mapping.add(User, lambda x: x.get('key') == 'user')
WORKFLOW_ORM.node_mapping.add(DateTime, lambda x: x.get('format') == 'date-time')
WORKFLOW_ORM.node_mapping.add(UUID, lambda x: x.get('format') == 'uuid')
WORKFLOW_ORM.node_mapping.add(LoadCustomFunc, lambda x: x.get('key') == 'custom_func')
WORKFLOW_ORM.node_mapping.add(StringType, lambda x: x.get('key') == 'custom_func')
WORKFLOW_ORM.node_mapping.add(StringType, lambda x: x.get('key') == 'status')
WORKFLOW_ORM.node_mapping.add(StringType, lambda x: x.get('key') == 'uri')
WORKFLOW_ORM.node_mapping.add(FilePath, lambda x: x.get('key') == 'project_dir')
WORKFLOW_ORM.node_mapping.add(FilePath, lambda x: x.get('key') == 'workdir')
Exemplo n.º 9
0
def write_web(graph, orm_data_tag='haddock_type', indent=2, root_nid=None):
    """
    Export a graph in Spyder .web format

    Empty data blocks or Python None values are not exported.

    .. note::
    Web graph export uses the Graph iternodes and iteredges methods to retrieve
    nodes and edges and 'get' the data labels. The behaviour of this process is
    determined by the single node/edge mixin classes and the ORM mapper.

    :param graph:          Graph object to export
    :type graph:           :graphit:Graph
    :param orm_data_tag:   data key to use for .web data identifier
    :type orm_data_tag:    :py:str
    :param indent:         .web file white space indentation level
    :type indent:          :py:int
    :param root_nid:       Root node ID in graph hierarchy

    :return:               Spyder .web graph representation
    :rtype:                :py:str
    """

    # Build ORM with format specific conversion classes
    weborm = GraphORM()
    weborm.node_mapping.add(
        RestraintsInterface,
        lambda x: x.get(graph.data.key_tag) == 'activereslist')
    weborm.node_mapping.add(
        RestraintsInterface,
        lambda x: x.get(graph.data.key_tag) == 'passivereslist')

    # Resolve the root node (if any) for hierarchical data structures
    if root_nid and root_nid not in graph.nodes:
        raise GraphitException(
            'Root node ID {0} not in graph'.format(root_nid))
    else:
        root_nid = resolve_root_node(graph)
        if root_nid is None:
            raise GraphitException('Unable to resolve root node ID')

    # Set current NodeTools aside and register new one
    curr_nt = graph.node_tools
    graph.node_tools = WebNodeTools

    # Set current ORM aside and register new one.
    curr_orm = graph.orm
    graph.orm = weborm

    # Create empty file buffer
    string_buffer = StringIO()

    # Traverse node hierarchy
    def _walk_dict(node, indent_level):

        # First, collect all leaf nodes and write. Sort according to 'key'
        for leaf in sorted(
            [n for n in node.children(include_self=True) if n.isleaf],
                key=lambda obj: obj.key):

            # Do not export nodes that have no data or None but do export
            # empty data blocks (has orm_data_tag)
            if leaf.get(graph.data.value_tag, None) is None:
                if leaf.get(orm_data_tag):
                    string_buffer.write('{0}{1} = {2} (\n'.format(
                        ' ' * indent_level, leaf.get(graph.data.key_tag),
                        leaf.get(orm_data_tag)))
                    string_buffer.write('{0}),\n'.format(' ' * indent_level))
                continue

            # Format 'Array' types when they are list style leaf nodes
            if leaf.get('is_array', False) or leaf.get('type') == 'array':
                string_buffer.write('{0}{1} = {2} (\n'.format(
                    ' ' * indent_level, leaf.get(graph.data.key_tag),
                    leaf.get(orm_data_tag)))

                array_indent = indent_level + indent
                for array_type in leaf.get(graph.data.value_tag, default=[]):
                    string_buffer.write('{0}{1},\n'.format(
                        ' ' * array_indent, array_type))

                string_buffer.write('{0}),\n'.format(' ' * indent_level))

            # Format key, value pairs
            else:
                string_buffer.write('{0}{1} = {2},\n'.format(
                    ' ' * indent_level, leaf.get(graph.data.key_tag),
                    leaf.get(graph.data.value_tag, default='')))

        # Second, process child non-leaf nodes
        for child in [n for n in node.children() if not n.isleaf]:

            # Write block header
            key = ''
            if not child.get('is_array',
                             False) or child.get('type') == 'array':
                key = '{0} = '.format(child.get(graph.data.key_tag))
            string_buffer.write('{0}{1}{2} (\n'.format(
                ' ' * indent_level, key, child.get(orm_data_tag)))

            # Indent new data block one level down and walk children
            indent_level += indent
            _walk_dict(child, indent_level)

            # Close data block and indent one level up
            indent_level -= indent
            string_buffer.write('{0}),\n'.format(' ' * indent_level))

    # Build adjacency only once
    with graph.adjacency as adj:
        rootnode = graph.getnodes(root_nid)

    if rootnode.isleaf:
        _walk_dict(rootnode, 0)
    else:
        string_buffer.write('{0} (\n'.format(rootnode.get(orm_data_tag)))
        _walk_dict(rootnode, indent)
        string_buffer.write(')\n')

    # Restore original ORM and NodeTools
    graph.node_tools = curr_nt
    graph.orm = curr_orm

    logger.info('Graph {0} exported in WEB format'.format(repr(graph)))

    # Reset buffer cursor
    string_buffer.seek(0)
    return string_buffer.read()
Exemplo n.º 10
0
def read_web(web,
             graph=None,
             orm_data_tag='haddock_type',
             auto_parse_format=True):
    """
    Import hierarchical data structures defined in the Spider .web format

    The data block type identifiers used in the .web format are stored in
    the nodes using the `orm_data_tag` attribute. These can be used by the
    Graph ORM mapper for custom data exchange in the graph.

    :param web:               Spider .web data format to import
    :type web:                file, string, stream or URL
    :param graph:             graph object to import TGF data in
    :type graph:              :graphit:Graph
    :param orm_data_tag:      data key to use for .web data identifier
    :type orm_data_tag:       :py:str
    :param auto_parse_format: automatically detect basic format types using JSON decoding
    :type auto_parse_format:  :py:bool

    :return:                  Graph object
    :rtype:                   :graphit:Graph
    """

    web_file = open_anything(web)
    if graph is None:
        graph = GraphAxis()
    elif not isinstance(graph, GraphAxis):
        raise GraphitException('Unsupported graph type {0}'.format(
            type(graph)))

    # Build .web parser ORM with format specific conversion classes
    weborm = GraphORM()
    weborm.node_mapping.add(
        RestraintsInterface,
        lambda x: x.get(graph.data.key_tag) == 'activereslist')
    weborm.node_mapping.add(
        RestraintsInterface,
        lambda x: x.get(graph.data.key_tag) == 'passivereslist')

    # Set current ORM aside and register parser ORM.
    curr_orm = graph.orm
    graph.orm = weborm

    curr_obj_nid = None
    object_open_tags = 0
    object_close_tags = 0
    array_key_counter = 1
    array_store = []
    for i, line in enumerate(web_file.readlines()):
        line = line.strip()
        if len(line):

            # Detect start of new object definition
            if line.endswith('('):

                # Process data
                meta_data = [n.strip() for n in line.strip('(').split('=', 1)]
                ddict = {orm_data_tag: meta_data[-1], 'is_array': False}
                if len(meta_data) > 1:
                    node_key = meta_data[0]
                else:
                    node_key = 'item{0}'.format(array_key_counter)
                    ddict['is_array'] = True
                    array_key_counter += 1

                # Clear the array store
                array_store = []

                # First object defines graph root
                if graph.empty():
                    curr_obj_nid = graph.add_node(node_key, **ddict)
                    graph.root = curr_obj_nid

                # Add new object as child of current object
                else:
                    child_obj_nid = graph.add_node(node_key, **ddict)
                    graph.add_edge(curr_obj_nid, child_obj_nid)
                    curr_obj_nid = child_obj_nid

                object_open_tags += 1

            # Detect end of object definition
            elif line.startswith(')'):

                # If there is data in the array store, add it to node
                if len(array_store):
                    array_node = graph.getnodes(curr_obj_nid)
                    array_node.is_array = True
                    array_node.set(graph.data.value_tag, array_store)

                # Reset array key counter
                array_key_counter = 1

                # Move one level up the object three
                curr_obj_nid = node_parent(graph, curr_obj_nid,
                                           graph.root) or graph.root
                object_close_tags += 1

            # Parse object parameters
            else:

                # Parse key,value pairs and add as leaf node
                params = [n.strip() for n in line.rstrip(',').split('=', 1)]

                if '=' in line and len(params) == 2:
                    leaf_nid = graph.add_node(params[0])
                    graph.add_edge(curr_obj_nid, leaf_nid)

                    value = params[1]
                    if auto_parse_format:
                        value = json_decode_params(params[1])

                    leaf_node = graph.getnodes(leaf_nid)
                    leaf_node.set(graph.data.value_tag, value)

                # Parse single values as array data
                elif len(params) == 1:

                    value = params[0]
                    if auto_parse_format:
                        value = json_decode_params(params[0])

                    # Store array items as nodes
                    array_store.append(value)

                else:
                    logger.warning(
                        'Unknown .web data formatting on line: {0}, {1}'.
                        format(i, line))

    web_file.close()

    # Object blocks opening '(' and closing ')' tag count should be balanced
    if object_open_tags != object_close_tags:
        raise AssertionError(
            'Unbalanced object block, something is wrong with the file format')

    # Restore original ORM
    graph.orm = curr_orm

    # Root is of type 'array', rename key from 'item1' to 'project'
    root = graph.getnodes(graph.root)
    root.key = 'project'

    return graph
Exemplo n.º 11
0
def write_pydata(graph, nested=True, sep='.', default=None, allow_none=True, export_all=False, include_root=False):
    """
    Export a graph to a (nested) dictionary
    
    Convert graph representation of the dictionary tree into a dictionary
    using a nested or flattened representation of the dictionary hierarchy.
    
    In a flattened representation, the keys are concatenated using the `sep`
    separator.
    Dictionary keys and values are obtained from the node attributes using
    `key_tag` and `value_tag`. The key_tag is set to
    graph key_tag by default.
    
    Exporting only primary key_tag/value_tag pairs is default
    behaviour. Use the 'export_all' argument to export the full node
    dictionary.
    
    TODO: include ability to export multiple isolated subgraphs
    
    :param graph:          Graph object to export
    :type graph:           :graphit:GraphAxis
    :param nested:         return a nested or flattened dictionary
    :type nested:          :py:bool
    :param sep:            key separator used in flattening the dictionary
    :type sep:             :py:str
    :param default:        value to use when node value was not found using
                           value_tag.
    :type default:         mixed
    :param allow_none:     allow None values in the output
    :type allow_none:      :py:bool
    :param export_all:     Export the full node storage dictionary.
    :type export_all:      :py:bool
    
    :rtype:                :py:dict
    """
    
    # No nodes, return empty dict
    if graph.empty():
        logging.info('Graph is empty: {0}'.format(repr(graph)))
        return {}
    
    # Graph should be of type GraphAxis with a root node nid defined
    if not isinstance(graph, GraphAxis):
        raise TypeError('Unsupported graph type {0}'.format(type(graph)))
    if graph.root is None:
        raise GraphitException('No graph root node defines')
    
    # Build ORM with format specific conversion classes
    pydataorm = GraphORM(inherit=False)
    pydataorm.node_mapping.add(ParseDictionaryType, lambda x: x.get('format') == 'dict')
    pydataorm.node_mapping.add(ParseListType, lambda x: x.get('format') == 'list')
    pydataorm.node_mapping.add(ParseSetType, lambda x: x.get('format') == 'set')
    pydataorm.node_mapping.add(ParseTupleType, lambda x: x.get('format') == 'tuple')
    
    # Set current ORM aside and register new one.
    curr_orm = graph.orm
    graph.orm = pydataorm
    
    # Set current NodeTools aside and register new one
    curr_nt = graph.node_tools
    graph.node_tools = PyDataNodeTools
    
    # Define start node for recursive export
    if len(graph) > 1:
        root = graph.getnodes(resolve_root_node(graph))
    else:
        root = graph.getnodes(list(graph.nodes.keys()))
    
    # If we export the full node dictionary, also export None key/value pairs
    if export_all:
        allow_none = True
    
    # Start recursive parsing
    root_key, data = root.serialize(allow_none=allow_none, export_all=export_all, default=default)
    
    # Include root_key or not
    if include_root and root_key:
        data = {root_key: data}
    
    # Flatten the dictionary if needed
    if not nested:
        data = flatten_nested_dict(data, sep=sep)
    
    # Restore original ORM and NodeTools
    graph.node_tools = curr_nt
    graph.orm = curr_orm
    
    return data
Exemplo n.º 12
0
        else:
            self.nodes[self.nid][key] = file_obj


class ArrayType(NodeAxisTools):
    def set(self, key, value=None):

        data = [d.strip(',') for d in value]
        formatted = []

        for item in data:
            try:
                item = float(item)
            except ValueError:
                try:
                    item = int(item)
                except ValueError:
                    item = str(item)

            formatted.append(item)

        self.nodes[self.nid][key] = formatted


CLIORM = GraphORM()
CLIORM.node_mapping.add(IntegerType, lambda x: x.get('type') == 'integer')
CLIORM.node_mapping.add(FloatType, lambda x: x.get('type') == 'number')
CLIORM.node_mapping.add(BooleanType, lambda x: x.get('type') == 'boolean')
CLIORM.node_mapping.add(ArrayType, lambda x: x.get('type') == 'array')
CLIORM.node_mapping.add(FileType, lambda x: x.get('format') == 'file')