Beispiel #1
0
def check_dimension(typeattr, unit_id=None):
    """
        Check that the unit and dimension on a type attribute match.
        Alternatively, pass in a unit manually to check against the dimension
        of the type attribute
    """
    if unit_id is None:
        unit_id = typeattr.unit_id

    dimension_id = get_attr(typeattr.attr_id).dimension_id

    if unit_id is not None and dimension_id is None:
        # First error case
        unit_dimension_id = units.get_dimension_by_unit_id(unit_id).id
        unit = units.get_unit(unit_id)
        dimension = units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True)
        raise HydraError(f"Unit {unit_id} ({unit.abbreviation}) has dimension_id"+
                         f" {dimension.id}(name=dimension.name),"+
                         " but attribute has no dimension")
    elif unit_id is not None and dimension_id is not None:
        unit_dimension_id = units.get_dimension_by_unit_id(unit_id).id
        unit = units.get_unit(unit_id)
        dimension1 = units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True)
        dimension2 = units.get_dimension(unit_dimension_id, do_accept_dimension_id_none=True)
        if unit_dimension_id != dimension_id:
            # Only error case
            raise HydraError(f"Unit {unit_id} ({unit.abbreviation}) has dimension_id"+
                             f" {dimension1.id}(name=dimension1.name),"+
                             f" but attribute has id: {dimension2.id}({dimension2.name})")
Beispiel #2
0
def remove_template_from_network(network_id, template_id, remove_attrs, **kwargs):
    """
        Remove all resource types in a network relating to the specified
        template.
        remove_attrs ('Y' or 'N')
            Flag to indicate whether the attributes associated with the template
            types should be removed from the resources in the network. These will
            only be removed if they are not shared with another template on the network
    """

    try:
        network = db.DBSession.query(Network).filter(Network.id == network_id).one()
    except NoResultFound:
        raise HydraError("Network %s not found"%network_id)

    try:
        template = db.DBSession.query(Template).filter(Template.id == template_id).one()
    except NoResultFound:
        raise HydraError("Template %s not found"%template_id)

    type_ids = [tmpltype.id for tmpltype in template.get_types()]

    node_ids = [n.id for n in network.nodes]
    link_ids = [l.id for l in network.links]
    group_ids = [g.id for g in network.resourcegroups]

    if remove_attrs == 'Y':
        #find the attributes to remove
        resource_attrs_to_remove = _get_resources_to_remove(network, template)
        for n in network.nodes:
            resource_attrs_to_remove.extend(_get_resources_to_remove(n, template))
        for l in network.links:
            resource_attrs_to_remove.extend(_get_resources_to_remove(l, template))
        for g in network.resourcegroups:
            resource_attrs_to_remove.extend(_get_resources_to_remove(g, template))

        for ra in resource_attrs_to_remove:
            db.DBSession.delete(ra)

    resource_types = db.DBSession.query(ResourceType).filter(
        and_(or_(
            ResourceType.network_id==network_id,
            ResourceType.node_id.in_(node_ids),
            ResourceType.link_id.in_(link_ids),
            ResourceType.group_id.in_(group_ids),
        ), ResourceType.type_id.in_(type_ids))).all()

    for resource_type in resource_types:
        db.DBSession.delete(resource_type)

    db.DBSession.flush()
Beispiel #3
0
def _parse_data_restriction(restriction_dict):
    if restriction_dict is None or len(restriction_dict) == 0:
        return None

    #replace soap text with an empty string
    #'{soap_server.hydra_complexmodels}' -> ''
    dict_str = re.sub('{[a-zA-Z._]*}', '', str(restriction_dict))

    if isinstance(restriction_dict, dict):
        new_dict = restriction_dict
    else:
        try:
            new_dict = json.loads(restriction_dict)
        except:
            raise HydraError(
                f"Unable to parse the JSON in the restriction data: {restriction_dict}"
            )

    #Evaluate whether the dict actually contains anything.
    if not isinstance(new_dict, dict) or len(new_dict) == 0:
        log.critical('A restriction was specified, but it is null')
        return None

    ret_dict = {}
    for k, v in new_dict.items():
        if (isinstance(v, str) or isinstance(v, list)) and len(v) == 1:
            ret_dict[k] = v[0]
        else:
            ret_dict[k] = v

    return json.dumps(ret_dict)
Beispiel #4
0
    def _create_dataframe(cls, value):
        """
            Builds a dataframe from the value
        """
        try:

            ordered_jo = json.loads(six.text_type(value), object_pairs_hook=collections.OrderedDict)

            #Pandas does not maintain the order of dicts, so we must break the dict
            #up and put it into the dataframe manually to maintain the order.

            cols = list(ordered_jo.keys())

            if len(cols) == 0:
                raise ValueError("Dataframe has no columns")

            #Assume all sub-dicts have the same set of keys
            if isinstance(ordered_jo[cols[0]], list):
                index = range(len(ordered_jo[cols[0]]))
            else:
                #cater for when the indices are not the same by identifying
                #all the indices, and then making a set of them.
                longest_index = []
                for col in ordered_jo.keys():
                    index = list(ordered_jo[col].keys())
                    if len(index) > len(longest_index):
                        longest_index = index
                index = longest_index

            df = pd.read_json(value, convert_axes=False)

            #Make both indices the same type, so they can be compared
            df.index = df.index.astype(str)
            new_index = pd.Index(index).astype(str)

            #Now reindex the dataframe so that the index is in the correct order,
            #as per the data in the DB, and not with the default pandas ordering.
            new_df = df.reindex(new_index)

            #If the reindex didn't work, don't use that value
            if new_df.isnull().sum().sum() != len(df.index):
                df = new_df


        except ValueError as e:
            """ Raised on scalar types used as pd.DataFrame values
                in absence of index arg
            """
            log.exception(e)
            raise HydraError(str(e))

        except AssertionError as e:
            log.warning("An error occurred creating the new data frame: %s. Defaulting to a simple read_json"%(e))
            df = pd.read_json(value).fillna(0)

        return df
Beispiel #5
0
    def _create_dataframe(cls, value):
        """
            Builds a dataframe from the value
        """
        try:

            ordered_jo = json.loads(six.text_type(value), object_pairs_hook=collections.OrderedDict)

            #Pandas does not maintain the order of dicts, so we must break the dict
            #up and put it into the dataframe manually to maintain the order.

            cols = list(ordered_jo.keys())

            if len(cols) == 0:
                raise ValueError("Dataframe has no columns")

            #Assume all sub-dicts have the same set of keys
            if isinstance(ordered_jo[cols[0]], list):
                index = range(len(ordered_jo[cols[0]]))
            else:
                index = list(ordered_jo[cols[0]].keys())
            data = []
            for c in cols:
                if isinstance(ordered_jo[c], list):
                    data.append(ordered_jo[c])
                else:
                    data.append(list(ordered_jo[c].values()))

            # This goes in 'sideways' (cols=index, index=cols), so it needs to be transposed after to keep
            # the correct structure
            # We also try to coerce the data to a regular numpy array first. If the shape is correct
            # this is a much faster way of creating the DataFrame instance.
            try:
                np_data = np.array(data)
            except ValueError:
                np_data = None

            if np_data is not None and np_data.shape == (len(cols), len(index)):
                df = pd.DataFrame(np_data, columns=index, index=cols).transpose()
            else:
                # TODO should these heterogenous structure be supported?
                # See https://github.com/hydraplatform/hydra-base/issues/72
                df = pd.DataFrame(data, columns=index, index=cols).transpose()


        except ValueError as e:
            """ Raised on scalar types used as pd.DataFrame values
                in absence of index arg
            """
            raise HydraError(str(e))

        except AssertionError as e:
            log.warning("An error occurred creating the new data frame: %s. Defaulting to a simple read_json"%(e))
            df = pd.read_json(value).fillna(0)

        return df
Beispiel #6
0
 def _get_template_type_by_name(self, name, resource_type=None):
     for template_type in self.template['templatetypes']:
         if name == template_type['name']:
             if resource_type is None or template_type[
                     'resource_type'] == resource_type:
                 return template_type
     msg = 'Template does not contain node of type "{}".'.format(name)
     if self.ignore_type_errors:
         log.warning(msg)
         return {}
     else:
         raise HydraError(msg)
Beispiel #7
0
def create_mysql_db(db_url):
    """
        To simplify deployment, create the mysql DB if it's not there.
        Accepts a URL with or without a DB name stated, and returns a db url
        containing the db name for use in the main sqlalchemy engine.

        THe formats can take the following form:

        mysql+driver://username:password@hostname
        mysql+driver://username:password@hostname/dbname

        if no DB name is specified, it is retrieved from config
    """

    #add a special case for a memory-based sqlite session
    if db_url == 'sqlite://':
        return db_url

    #Remove trailing whitespace and forwardslashes
    db_url = db_url.strip().strip('/')

    #Check this is a mysql URL
    if db_url.find('mysql') >= 0:

        #Get the DB name from config and check if it's in the URL
        db_name = config.get('mysqld', 'db_name', 'hydradb')
        if db_url.find(db_name) >= 0:
            no_db_url = db_url.rsplit("/", 1)[0]
        else:
            #Check that there is a hostname specified, as we'll be using the '@' symbol soon..
            if db_url.find('@') == -1:
                raise HydraError("No Hostname specified in DB url")

            #Check if there's a DB name specified that's different to the one in config.
            host_and_db_name = db_url.split('@')[1]
            if host_and_db_name.find('/') >= 0:
                no_db_url, db_name = db_url.rsplit("/", 1)
            else:
                no_db_url = db_url
                db_url = no_db_url + "/" + db_name

        db_url = "{}?charset=utf8&use_unicode=1".format(db_url)

        if config.get('mysqld', 'auto_create', 'Y') == 'Y':
            tmp_engine = create_engine(no_db_url)
            log.debug(
                "Creating database {0} as it does not exist.".format(db_name))
            tmp_engine.execute(
                "CREATE DATABASE IF NOT EXISTS {0}".format(db_name))

    return db_url
Beispiel #8
0
 def set_value(self, val):
     self._value = val
     try:
         """ Use validate test to confirm is pd.DataFrame... """
         self.validate()
     except AssertionError:
         """ ...otherwise attempt as json..."""
         try:
             df = self.__class__._create_dataframe(val)
             self._value = df
             self.validate()
         except Exception as e:
             """ ...and fail if neither """
             raise HydraError(str(e))
Beispiel #9
0
def get_template_by_name(name, **kwargs):
    """
        Get a specific resource template, by name.
    """
    try:
        tmpl_i = db.DBSession.query(Template).filter(
            Template.name == name).one()

        tmpl_j = JSONObject(tmpl_i)

        tmpl_j.templatetypes = tmpl_i.get_types()

        return tmpl_j
    except NoResultFound:
        log.info("%s is not a valid identifier for a template", name)
        raise HydraError('Template "%s" not found' % name)
Beispiel #10
0
def get_templatetype_by_name(template_id, type_name, **kwargs):
    """
        Get a specific resource type by name.
    """

    try:
        templatetype = db.DBSession.query(TemplateType).filter(
            TemplateType.template_id == template_id,
            TemplateType.name == type_name).one()
    except NoResultFound:
        raise HydraError("%s is not a valid identifier for a type" %
                         (type_name))

    inherited_templatetype = get_templatetype(templatetype.id, **kwargs)

    return inherited_templatetype
Beispiel #11
0
def get_template(template_id, **kwargs):
    """
        Get a specific resource template, by ID.
    """
    try:
        tmpl_i = db.DBSession.query(Template).filter(
            Template.id == template_id).one()

        tmpl_j = JSONObject(tmpl_i)

        tmpl_j.templatetypes = tmpl_i.get_types()

        #ignore the messing around we've been doing to the ORM objects
        #db.DBSession.expunge(tmpl_i)

        return tmpl_j
    except NoResultFound:
        raise HydraError("Template %s not found" % template_id)
Beispiel #12
0
def delete_templatetype(type_id,
                        template_i=None,
                        delete_resourcetypes=False,
                        flush=True,
                        delete_children=False,
                        **kwargs):
    """
        Delete a template type and its typeattrs.
    """

    try:
        tmpltype_i = db.DBSession.query(TemplateType)\
                .filter(TemplateType.id == type_id).one()
    except NoResultFound:
        raise ResourceNotFoundError("Template Type %s not found" % (type_id, ))

    if template_i is None:
        template_i = db.DBSession.query(Template).filter(
            Template.id == tmpltype_i.template_id).one()

    if len(tmpltype_i.get_children()) > 0 and delete_children is False:
        raise HydraError(
            f"Unable to delete type. Template type {tmpltype_i.name} (ID: {type_id}) has"
            f"children. If you want to delete this, use the 'delete_children' flag."
        )

    if delete_children is True:
        tmpltype_i.delete_children(delete_resourcetypes=delete_resourcetypes)

    tmpltype_i.check_can_delete_resourcetypes(
        delete_resourcetypes=delete_resourcetypes)

    if delete_resourcetypes is True:
        tmpltype_i.delete_resourcetypes()

    #first remove the templatetypes
    for ta_i in tmpltype_i.typeattrs:
        db.DBSession.delete(ta_i)

    db.DBSession.delete(tmpltype_i)

    if flush:
        db.DBSession.flush()
Beispiel #13
0
def assign_type_to_resource(type_id, resource_type, resource_id, **kwargs):
    """Assign new type to a resource. This function checks if the necessary
    attributes are present and adds them if needed. Non existing attributes
    are also added when the type is already assigned. This means that this
    function can also be used to update resources, when a resource type has
    changed.
    """

    if resource_type == 'NETWORK':
        resource = db.DBSession.query(Network).filter(Network.id == resource_id).one()
    elif resource_type == 'NODE':
        resource = db.DBSession.query(Node).filter(Node.id == resource_id).one()
    elif resource_type == 'LINK':
        resource = db.DBSession.query(Link).filter(Link.id == resource_id).one()
    elif resource_type == 'GROUP':
        resource = db.DBSession.query(ResourceGroup).filter(ResourceGroup.id == resource_id).one()

    res_attrs, res_type, res_scenarios = set_resource_type(resource, type_id, **kwargs)

    type_i = _get_type(type_id)

    if resource_type != type_i.resource_type:
        raise HydraError("Cannot assign a %s type to a %s"%
                         (type_i.resource_type, resource_type))

    if res_type is not None:
        db.DBSession.bulk_insert_mappings(ResourceType, [res_type])

    if len(res_attrs) > 0:
        db.DBSession.bulk_insert_mappings(ResourceAttr, res_attrs)

    if len(res_scenarios) > 0:
        db.DBSession.bulk_insert_mappings(ResourceScenario, res_scenarios)

    #Make DBsession 'dirty' to pick up the inserts by doing a fake delete.
    db.DBSession.query(Attr).filter(Attr.id is None).delete()

    db.DBSession.flush()

    return_type = _get_type(type_id)

    return return_type
Beispiel #14
0
    def attributes_from_nodes(self):
        """ Generator to convert Pywr nodes data in to Hydra attribute data.

        This function is intended to be used to convert Pywr components (e.g. recorders, parameters, etc.)  data
        in to a format that can be imported in to Hydra. The Pywr component data is a dict of dict with each
        sub-dict represent a single component (see the "recorder" or "parameters" section of the Pywr JSON format). This
        function returns Hydra data to add a Attribute for each of the components in the outer dict.
        """
        nodes = self.data['nodes']

        attributes = set()

        for node in nodes:
            node_type = node['type'].lower()
            node_klass = NodeMeta.node_registry.get(node_type)
            if node_klass is None:
                msg = f"Node type {node_type} not recognised."
                if self.ignore_type_errors is False:
                    raise HydraError(msg)
                log.warning(msg + "Attempting to add default values.")
                for name in node.keys():
                    if name not in PYWR_PROTECTED_NODE_KEYS:
                        attributes.add(name)
                continue
            schema = node_klass.Schema()

            # Create an attribute for each field in the schema.
            for name, field in schema.fields.items():
                if name in PYWR_PROTECTED_NODE_KEYS:
                    continue
                attributes.add(name)

        for attr in sorted(attributes):
            yield self.attr_name_map.get(
                attr, {
                    'name': attr,
                    'description': '',
                    'dimension_id': self.attr_dimension_map.get(attr)
                })
Beispiel #15
0
def import_template_json(template_json_string, allow_update=True, **kwargs):
    """
        Add the template, type and typeattrs described in a JSON file.

        Delete type, typeattr entries in the DB that are not in the JSON file.
        The assumption is that they have been deleted and are no longer required.

        The allow_update indicates whether an existing template of the same name should
        be updated, or whether it should throw an 'existing name' error.
    """

    user_id = kwargs.get('user_id')

    try:
        template_dict = json.loads(template_json_string)
    except:
        raise HydraError(
            "Unable to parse JSON string. Plese ensure it is JSON compatible.")

    return import_template_dict(template_dict,
                                allow_update=allow_update,
                                user_id=user_id)
Beispiel #16
0
def validate_resourcescenario(resourcescenario, template_id=None, **kwargs):
    """
        Perform a check to ensure a resource scenario's datasets are correct given what the
        definition of that resource (its type) specifies.
    """
    res = resourcescenario.resourceattr.get_resource()

    types = res.types

    dataset = resourcescenario.dataset

    if len(types) == 0:
        return

    if template_id is not None:
        if template_id not in [r.templatetype.template_id for r in res.types]:
            raise HydraError("Template %s is not used for resource attribute %s in scenario %s"%\
                             (template_id, resourcescenario.resourceattr.attr.name,
                             resourcescenario.scenario.name))

    #Validate against all the types for the resource
    for resourcetype in types:
        #If a specific type has been specified, then only validate
        #against that type and ignore all the others
        if template_id is not None:
            if resourcetype.templatetype.template_id != template_id:
                continue
        #Identify the template types for the template
        tmpltype = resourcetype.templatetype
        for ta in tmpltype.typeattrs:
            #If we find a template type which mactches the current attribute.
            #we can do some validation.
            if ta.attr_id == resourcescenario.resourceattr.attr_id:
                if ta.data_restriction:
                    log.debug("Validating against %s", ta.data_restriction)
                    validation_dict = json.loads(ta.data_restriction)
                    dataset_util.validate_value(validation_dict, dataset.get_val())
Beispiel #17
0
def set_resource_type(resource, type_id, types={}, **kwargs):
    """
        Set this resource to be a certain type.
        Type objects (a dictionary keyed on type_id) may be
        passed in to save on loading.
        This function does not call save. It must be done afterwards.
        New resource attributes are added to the resource if the template
        requires them. Resource attributes on the resource but not used by
        the template are not removed.
        @returns list of new resource attributes
        ,new resource type object
    """

    #get the resource#s network ID:
    if kwargs.get('network_id') is not None:
        network_id = kwargs['network_id']
    elif isinstance(resource, Network):
        network_id = resource.id
    elif resource.network_id:
        network_id = resource.network_id
    elif resource.network:
        network_id = resource.network.id

    child_template_id = kwargs.get('child_template_id')
    if kwargs.get('child_template_id') is None:
        if network_id is not None:
            child_template_id = get_network_template(network_id, type_id)

    ref_key = resource.ref_key

    existing_attr_ids = []
    for res_attr in resource.attributes:
        existing_attr_ids.append(res_attr.attr_id)

    if type_id in types:
        type_i = types[type_id]
    else:
        type_i = _get_type(type_id)

    type_attrs = dict()
    for typeattr in type_i.typeattrs:
        type_attrs[typeattr.attr_id] = {
            'is_var':typeattr.attr_is_var,
            'default_dataset_id': typeattr.default_dataset.id if typeattr.default_dataset else None
        }

    # check if attributes exist
    missing_attr_ids = set(type_attrs.keys()) - set(existing_attr_ids)

    # add attributes if necessary
    new_res_attrs = []

    #This is a dict as the length of the list may not match the new_res_attrs
    #Keyed on attr_id, as resource_attr_id doesn't exist yet, and there should only
    #be one attr_id per template.
    new_res_scenarios = {}
    for attr_id in missing_attr_ids:
        ra_dict = dict(
            ref_key=ref_key,
            attr_id=attr_id,
            attr_is_var=type_attrs[attr_id]['is_var'],
            node_id=resource.id if ref_key == 'NODE' else None,
            link_id=resource.id if ref_key == 'LINK' else None,
            group_id=resource.id if ref_key == 'GROUP' else None,
            network_id=resource.id if ref_key == 'NETWORK' else None,
        )
        new_res_attrs.append(ra_dict)



        if type_attrs[attr_id]['default_dataset_id'] is not None:
            if hasattr(resource, 'network'):
                for s in resource.network.scenarios:

                    if new_res_scenarios.get(attr_id) is None:
                        new_res_scenarios[attr_id] = {}

                    new_res_scenarios[attr_id][s.id] = dict(
                        dataset_id=type_attrs[attr_id]['default_dataset_id'],
                        scenario_id=s.id,
                        #Not stored in the DB, but needed to connect the RA ID later.
                        attr_id=attr_id,
                        ref_key=ref_key,
                        node_id=ra_dict['node_id'],
                        link_id=ra_dict['link_id'],
                        group_id=ra_dict['group_id'],
                        network_id=ra_dict['network_id'],
                    )


    resource_type = None
    for rt in resource.types:

        if rt.type_id == type_i.id:
            break

        errors = check_type_compatibility(rt.type_id, type_i.id, **kwargs)
        if len(errors) > 0:
            raise HydraError("Cannot apply type %s to resource %s as it "
                             "conflicts with type %s. Errors are: %s"
                             %(type_i.name, resource.get_name(),
                               rt.get_templatetype().name, ','.join(errors)))
    else:
        # add type to tResourceType if it doesn't exist already
        resource_type = dict(
            node_id=resource.id if ref_key == 'NODE' else None,
            link_id=resource.id if ref_key == 'LINK' else None,
            group_id=resource.id if ref_key == 'GROUP' else None,
            network_id=resource.id if ref_key == 'NETWORK' else None,
            ref_key=ref_key,
            type_id=type_id,
            child_template_id=child_template_id
        )

    return new_res_attrs, resource_type, new_res_scenarios
Beispiel #18
0
def _set_typeattr(typeattr, existing_ta=None, check_dimensions=True):
    """
        Add or update a type attribute.
        If an existing type attribute is provided, then update.

        Checks are performed to ensure that the dimension provided on the
        type attr (not updateable) is the same as that on the referring attribute.
        The unit provided (stored on tattr) must conform to the dimension stored
        on the referring attribute (stored on tattr).

        This is done so that multiple templates can all use the same attribute,
        but specify different units.

        If no attr_id is provided, but an attr_name and dimension are provided,
        then a new attribute can be created (or retrieved) and used. I.e., no
        attribute ID must be specified if attr_name and dimension are specified.

        ***WARNING***
        Setting ID to null means a new type attribute (and even a new attr)
        may be added, None are removed or replaced. To remove other type attrs, do it
        manually using delete_typeattr
    """
    if existing_ta is None:

        #check for an existing TA
        check_existing_ta = None
        if typeattr.attr_id and typeattr.type_id:
            check_existing_ta = db.DBSession.query(TypeAttr)\
                .filter(TypeAttr.attr_id == typeattr.attr_id, TypeAttr.type_id == typeattr.type_id).first()

        #There's already a TA with this attr_id in this type
        if check_existing_ta is not None:
            ta = check_existing_ta
        else:
            ta = TypeAttr(attr_id=typeattr.attr_id)
            ## default new type attrs to 'active'.
            ##This has replaced the database default because for child typeattrs,
            ##we need the status to be NULL so it can inherit from its parent
            ta.status = 'A'
    else:
        if typeattr.id is not None:
            ta = db.DBSession.query(TypeAttr).filter(
                TypeAttr.id == typeattr.id).one()
        else:
            ta = existing_ta

    ta.attr_id = typeattr.attr_id
    ta.unit_id = typeattr.unit_id
    ta.type_id = typeattr.type_id
    ta.data_type = typeattr.data_type
    ta.status = typeattr.status if typeattr.status is not None else 'A'

    if hasattr(
            typeattr,
            'default_dataset_id') and typeattr.default_dataset_id is not None:
        ta.default_dataset_id = typeattr.default_dataset_id

    ta.description = typeattr.description

    ta.properties = typeattr.get_properties()

    #support legacy use of 'is_var' instead of 'attr_is_var'
    if hasattr(typeattr, 'is_var') and typeattr.is_var is not None:
        typeattr.attr_is_var = typeattr.is_var

    ta.attr_is_var = typeattr.attr_is_var if typeattr.attr_is_var is not None else 'N'

    ta.data_restriction = _parse_data_restriction(typeattr.data_restriction)

    if typeattr.unit_id is None or typeattr.unit_id == '':
        # All right. Check passed
        ta.unit_id = None
        pass
    else:
        unit = units.get_unit(typeattr.unit_id)
        dimension = units.get_dimension(unit.dimension_id)
        if typeattr.attr_id is not None and typeattr.attr_id > 0 and check_dimensions:
            # Getting the passed attribute, so we need to check consistency
            # between attr dimension id and typeattr dimension id
            attr = db.DBSession.query(Attr).filter(
                Attr.id == ta.attr_id).first()
            if attr is not None and attr.dimension_id is not None and\
               attr.dimension_id != dimension.id or \
               attr is not None and attr.dimension_id is None:

                attr_dimension = units.get_dimension(attr.dimension_id)
                # In this case there is an inconsistency between
                # attr.dimension_id and typeattr.unit_id
                raise HydraError(
                    "Unit mismatch between type and attirbute." +
                    f"Type attribute for {attr.name} specifies " +
                    f"unit {unit.name}, dimension {dimension.name}." +
                    f"The attribute specifies a dimension of {attr_dimension.name}"
                    + "Cannot set a unit on a type attribute which " +
                    "does not match its attribute.")
        elif typeattr.attr_id is None and typeattr.name is not None:
            # Getting/creating the attribute by typeattr dimension id and typeattr name
            # In this case the dimension_id "null"/"not null" status is ininfluent
            attr = get_attr_by_name_and_dimension(typeattr.name, dimension.id)

            ta.attr_id = attr.id
            ta.attr = attr

    if check_dimensions:
        check_dimension(ta)

    if existing_ta is None:
        log.debug("Adding ta to DB")
        db.DBSession.add(ta)

    if not hasattr(ta, 'attr'):
        attr = db.DBSession.query(Attr).filter(Attr.id == ta.attr_id).one()
        ta.attr = attr

    return ta
Beispiel #19
0
def parse_json_typeattr(type_i,
                        typeattr_j,
                        attribute_j,
                        default_dataset_j,
                        user_id=None):
    dimension_i = None
    if attribute_j.dimension_id is not None:
        # The dimension_id of the attribute is not None
        dimension_i = units.get_dimension(attribute_j.dimension_id)
    elif attribute_j.dimension is not None:
        # The dimension name of the attribute is not None
        dimension_name = attribute_j.dimension.strip()
        if dimension_name.lower() in ('dimensionless', ''):
            dimension_name = 'dimensionless'
        dimension_i = units.get_dimension_by_name(dimension_name.strip())
    elif attribute_j.unit_id is not None:
        # The unit_id of the attribute is not None
        dimension_i = units.get_dimension_by_unit_id(attribute_j.unit_id)
    elif attribute_j.unit not in ('', None):
        # The unit of the attribute is not None
        attribute_unit_id = units.get_unit_by_abbreviation(attribute_j.unit).id
        attribute_j.unit_id = attribute_unit_id
        dimension_i = units.get_dimension_by_unit_id(attribute_j.unit_id)

    attribute_name = attribute_j.name.strip()

    if dimension_i is None:
        # In this case we must get the attr with dimension id not set
        attr_i = get_attr_by_name_and_dimension(attribute_name, None)
    else:
        attr_i = get_attr_by_name_and_dimension(attribute_name, dimension_i.id)

    #Get an ID for the attribute
    db.DBSession.flush()

    for ta in type_i.typeattrs:
        if ta.attr_id == attr_i.id:
            typeattr_i = ta
            break
    else:
        typeattr_i = TypeAttr()
        log.debug("Creating type attr: type_id=%s, attr_id=%s", type_i.id,
                  attr_i.id)
        typeattr_i.type_id = type_i.id
        typeattr_i.attr_id = attr_i.id
        typeattr_i.attr_is_var = typeattr_j.attr_is_var
        typeattr_i.attr = attr_i
        typeattr_i.status = 'A'
        type_i.typeattrs.append(typeattr_i)
        db.DBSession.add(typeattr_i)

    unit_id = None
    if attribute_j.unit_id is not None:
        typeattr_i.unit_id = typeattr_j.unit_id

    check_dimension(typeattr_i)

    if typeattr_j.description is not None:
        typeattr_i.description = typeattr_j.description

    if typeattr_j.properties is not None:
        if isinstance(typeattr_j.properties, dict):
            typeattr_i.properties = json.dumps(typeattr_j.properties)
        else:
            typeattr_i.properties = typeattr_j.properties

    if typeattr_j.is_var is not None:
        typeattr_i.attr_is_var = typeattr_j.is_var

    if typeattr_j.data_type is not None:
        typeattr_i.data_type = typeattr_j.data_type

    if default_dataset_j is not None:
        default = default_dataset_j

        unit = default.unit
        unit_id = None
        if unit not in (None, ''):
            unit_id = units.get_unit_by_abbreviation(unit).id

        if unit_id is None and typeattr_i.unit_id is not None:
            unit_id = typeattr_i.unit_id

        if unit_id is not None:
            check_dimension(typeattr_i, unit_id)

        if unit_id is not None and typeattr_i.unit_id is not None:
            if unit_id != typeattr_i.unit_id:
                raise HydraError(
                    "Default value has a unit of %s but the attribute"
                    " says the unit should be: %s" %
                    (typeattr_i.unit_id, unit_id))

        val = default.value

        data_type = default.type
        name = default.name if default.name not in (
            None, '') else "%s Default" % attr_i.name

        dataset_i = add_dataset(data_type,
                                val,
                                unit_id,
                                name=name,
                                user_id=user_id)
        typeattr_i.default_dataset_id = dataset_i.id

    if typeattr_j.restriction is not None or typeattr_j.data_restriction is not None:
        restriction = typeattr_j.restriction if typeattr_j.restriction is not None else typeattr_j.data_restriction
        if isinstance(restriction, dict):
            typeattr_i.data_restriction = json.dumps(restriction)
        else:
            typeattr_i.data_restriction = restriction
    else:
        typeattr_i.data_restriction = None

    return typeattr_i
Beispiel #20
0
def import_template_dict(template_dict, allow_update=True, **kwargs):

    user_id = kwargs.get('user_id')

    template_file_j = template_dict

    file_attributes = template_file_j.get('attributes')
    file_datasets = template_file_j.get('datasets', {})
    template_j = JSONObject(template_file_j.get('template', {}))

    #default datasets are optional, so don't force them to exist in the structure
    default_datasets_j = {}
    for k, v in file_datasets.items():
        default_datasets_j[int(k)] = Dataset(v)

    if file_attributes is None or len(template_j) == 0:
        raise HydraError("Invalid template. The template must have the following structure: " + \
                            "{'attributes':\\{...\\}, 'datasets':\\{...\\}, 'template':\\{...\\}}")

    #Normalise attribute IDs so they're always ints (in case they're specified as strings)
    attributes_j = {}
    for k, v in file_attributes.items():
        attributes_j[int(k)] = JSONObject(v)

    template_name = template_j.name
    template_description = template_j.description

    template_layout = None
    if template_j.layout is not None:
        if isinstance(template_j.layout, dict):
            template_layout = json.dumps(template_j.layout)
        else:
            template_layout = template_j.layout

    try:
        template_i = db.DBSession.query(Template).filter(
            Template.name == template_name).options(
                joinedload('templatetypes').joinedload('typeattrs').joinedload(
                    'attr')).one()
        if allow_update == False:
            raise HydraError("Existing Template Found with name %s" %
                             (template_name, ))
        else:
            template_i.layout = template_layout
            template_i.description = template_description
    except NoResultFound:
        log.debug("Template not found. Creating new one. name=%s",
                  template_name)
        template_i = Template(name=template_name,
                              description=template_description,
                              layout=template_layout)
        db.DBSession.add(template_i)

    types_j = template_j.templatetypes
    type_id_map = {r.id: r for r in template_i.templatetypes}
    #Delete any types which are in the DB but no longer in the JSON file
    type_name_map = {r.name: r.id for r in template_i.templatetypes}
    attr_name_map = {}
    for type_i in template_i.templatetypes:
        for typeattr in type_i.typeattrs:
            attr_name_map[typeattr.attr.name] = (typeattr.attr_id,
                                                 typeattr.type_id)

    existing_types = set([r.name for r in template_i.templatetypes])
    log.debug(
        ["%s : %s" % (tt.name, tt.id) for tt in template_i.templatetypes])
    log.debug("Existing types: %s", existing_types)

    new_types = set([t.name for t in types_j])
    log.debug("New Types: %s", new_types)

    types_to_delete = existing_types - new_types
    log.debug("Types to delete: %s", types_to_delete)
    log.debug(type_name_map)
    for type_to_delete in types_to_delete:
        type_id = type_name_map[type_to_delete]
        try:
            for i, tt in enumerate(template_i.templatetypes):
                if tt.id == type_id:
                    type_i = template_i.templatetypes[i]

                    #first remove all the type attributes associated to the type
                    for ta_i in type_i.typeattrs:
                        db.DBSession.delete(ta_i)

                    del (template_i.templatetypes[i])
                    log.debug("Deleting type %s (%s)", type_i.name, type_i.id)
                    del (type_name_map[type_to_delete])
                    db.DBSession.delete(type_i)
        except NoResultFound:
            pass

    #Add or update types.
    for type_j in types_j:
        type_name = type_j.name

        #check if the type is already in the DB. If not, create a new one.
        type_is_new = False
        if type_name in existing_types:
            type_id = type_name_map[type_name]
            type_i = type_id_map[type_id]
        else:
            log.debug("Type %s not found, creating new one.", type_name)
            type_i = TemplateType()
            type_i.name = type_name
            template_i.templatetypes.append(type_i)
            type_i.status = 'A'  ## defaults to active
            type_is_new = True

        if type_j.description is not None:
            type_i.description = type_j.description

        if type_j.alias is not None:
            type_i.alias = type_j.alias

        #Allow 'type' or 'resource_type' to be accepted
        if type_j.type is not None:
            type_i.resource_type = type_j.type
        elif type_j.resource_type is not None:
            type_i.resource_type = type_j.resource_type

        if type_j.resource_type is None:
            raise HydraError("No resource type specified."
                             " 'NODE', 'LINK', 'GROUP' or 'NETWORK'")

        if type_j.layout is not None:
            if isinstance(type_j, dict):
                type_i.layout = json.dumps(type_j.layout)
            else:
                type_i.layout = type_j.layout

        #delete any TypeAttrs which are in the DB but not in the JSON file
        existing_attrs = []
        if not type_is_new:
            for r in template_i.templatetypes:
                if r.name == type_name:
                    for typeattr in r.typeattrs:
                        existing_attrs.append(typeattr.attr.name)

        existing_attrs = set(existing_attrs)

        type_attrs = []
        for typeattr_j in type_j.typeattrs:
            if typeattr_j.attr_id is not None:
                attr_j = attributes_j[typeattr_j.attr_id].name
            elif typeattr_j.attr is not None:
                attr_j = typeattr_j.attr.name
            type_attrs.append(attr_j)

        type_attrs = set(type_attrs)

        attrs_to_delete = existing_attrs - type_attrs
        for attr_to_delete in attrs_to_delete:
            attr_id, type_id = attr_name_map[attr_to_delete]
            try:
                attr_i = db.DBSession.query(TypeAttr).filter(
                    TypeAttr.attr_id == attr_id,
                    TypeAttr.type_id == type_id).options(
                        joinedload('attr')).one()
                db.DBSession.delete(attr_i)
                log.debug("Attr %s in type %s deleted", attr_i.attr.name,
                          attr_i.templatetype.name)
            except NoResultFound:
                log.debug("Attr %s not found in type %s" % (attr_id, type_id))
                continue

        #Add or update type typeattrs
        #Support an external attribute dict or embedded attributes.
        for typeattr_j in type_j.typeattrs:
            if typeattr_j.attr_id is not None:
                attr_j = attributes_j[typeattr_j.attr_id]
            elif typeattr_j.attr is not None:
                attr_j = typeattr_j.attr

            default_dataset_j = None
            if typeattr_j.default_dataset is not None:
                default_dataset_j = typeattr_j.default_dataset
            elif typeattr_j.default is not None:  # for backward compatibility
                default_dataset_j = typeattr_j.default
            elif typeattr_j.default_dataset_id is not None:
                default_dataset_j = default_datasets_j[int(
                    typeattr_j.default_dataset_id)]

            parse_json_typeattr(type_i,
                                typeattr_j,
                                attr_j,
                                default_dataset_j,
                                user_id=user_id)

    db.DBSession.flush()

    return template_i
Beispiel #21
0
def import_template_xml(template_xml, allow_update=True, **kwargs):
    """
        Add the template, type and typeattrs described
        in an XML file.

        Delete type, typeattr entries in the DB that are not in the XML file
        The assumption is that they have been deleted and are no longer required.
    """
    user_id = kwargs.get('user_id')

    template_xsd_path = config.get('templates', 'template_xsd_path')
    xmlschema_doc = etree.parse(template_xsd_path)

    xmlschema = etree.XMLSchema(xmlschema_doc)

    xml_tree = etree.fromstring(template_xml)

    xmlschema.assertValid(xml_tree)

    template_name = xml_tree.find('template_name').text
    template_description = xml_tree.find('template_description')
    if template_description is not None:
        template_description = template_description.text

    template_layout = None
    if xml_tree.find('layout') is not None and \
               xml_tree.find('layout').text is not None:
        layout = xml_tree.find('layout')
        layout_string = get_etree_layout_as_dict(layout)
        template_layout = json.dumps(layout_string)

    try:
        tmpl_i = db.DBSession.query(Template).filter(Template.name == template_name)\
            .options(joinedload('templatetypes')
            .joinedload('typeattrs')
            .joinedload('attr')).one()

        if allow_update == False:
            raise HydraError("Existing Template Found with name %s" %
                             (template_name, ))
        else:
            log.debug("Existing template found. name=%s", template_name)
            tmpl_i.layout = template_layout
            tmpl_i.description = template_description
    except NoResultFound:
        log.debug("Template not found. Creating new one. name=%s",
                  template_name)
        tmpl_i = Template(name=template_name,
                          description=template_description,
                          layout=template_layout)
        db.DBSession.add(tmpl_i)

    types = xml_tree.find('resources')
    #Delete any types which are in the DB but no longer in the XML file
    type_name_map = {r.name: r.id for r in tmpl_i.templatetypes}
    attr_name_map = {}
    for type_i in tmpl_i.templatetypes:
        for typeattr in type_i.typeattrs:
            attr_name_map[typeattr.attr.name] = (typeattr.attr.id,
                                                 typeattr.type_id)

    existing_types = set([r.name for r in tmpl_i.templatetypes])

    new_types = set([r.find('name').text for r in types.findall('resource')])

    types_to_delete = existing_types - new_types

    for type_to_delete in types_to_delete:
        type_id = type_name_map[type_to_delete]
        try:
            type_i = db.DBSession.query(TemplateType).filter(
                TemplateType.id == type_id).one()
            log.debug("Deleting type %s", type_i.name)
            db.DBSession.delete(type_i)
        except NoResultFound:
            pass

    #Add or update types.
    for resource in types.findall('resource'):
        type_name = resource.find('name').text
        #check if the type is already in the DB. If not, create a new one.
        type_is_new = False
        if type_name in existing_types:
            type_id = type_name_map[type_name]
            type_i = db.DBSession.query(TemplateType).filter(
                TemplateType.id == type_id).options(
                    joinedload('typeattrs').joinedload('attr')).one()

        else:
            log.debug("Type %s not found, creating new one.", type_name)
            type_i = TemplateType()
            type_i.name = type_name
            tmpl_i.templatetypes.append(type_i)
            type_is_new = True

        if resource.find('alias') is not None:
            type_i.alias = resource.find('alias').text

        if resource.find('description') is not None:
            type_i.description = resource.find('description').text

        if resource.find('type') is not None:
            type_i.resource_type = resource.find('type').text

        if resource.find('layout') is not None and \
            resource.find('layout').text is not None:
            layout = resource.find('layout')
            layout_string = get_etree_layout_as_dict(layout)
            type_i.layout = json.dumps(layout_string)

        #delete any TypeAttrs which are in the DB but not in the XML file
        existing_attrs = []
        if not type_is_new:
            for r in tmpl_i.templatetypes:
                if r.name == type_name:
                    for typeattr in r.typeattrs:
                        existing_attrs.append(typeattr.attr.name)

        existing_attrs = set(existing_attrs)

        template_attrs = set(
            [r.find('name').text for r in resource.findall('attribute')])

        attrs_to_delete = existing_attrs - template_attrs
        for attr_to_delete in attrs_to_delete:
            attr_id, type_id = attr_name_map[attr_to_delete]
            try:
                attr_i = db.DBSession.query(TypeAttr).filter(
                    TypeAttr.attr_id == attr_id,
                    TypeAttr.type_id == type_id).options(
                        joinedload('attr')).one()
                db.DBSession.delete(attr_i)
                log.debug("Attr %s in type %s deleted", attr_i.attr.name,
                          attr_i.templatetype.name)
            except NoResultFound:
                log.debug("Attr %s not found in type %s", attr_id, type_id)
                continue

        #Add or update type typeattrs
        for attribute in resource.findall('attribute'):
            new_typeattr = _parse_xml_typeattr(type_i,
                                               attribute,
                                               user_id=user_id)

    db.DBSession.flush()

    return tmpl_i
Beispiel #22
0
    def generate_node_schema_resource_scenarios(self, pywr_node):
        """ Generate resource attribute, resource scenario and datasets for a Pywr node.

        """
        node_name = pywr_node['name']
        node_type = pywr_node['type'].lower()
        node_klass = NodeMeta.node_registry.get(node_type)

        msg = f"Node type {node_klass} not recognised."

        if node_klass is None and self.ignore_type_errors is False:
            raise HydraError(msg)

        if node_klass is None:
            log.warning(msg + " Using 'descriptor' as default data type")
            fields = dict((n, 'descriptor') for n in pywr_node.keys())
        else:
            schema = node_klass.Schema()
            fields = schema.fields

        # Create an attribute for each field in the schema.
        for name, field in fields.items():
            if name not in pywr_node:
                continue  # Skip missing fields

            if name in PYWR_PROTECTED_NODE_KEYS:
                continue
            # Non-protected keys represent data that must be added to Hydra.
            if isinstance(field, str):
                data_type = field  #the default field if it can't find the class
            else:
                data_type = data_type_from_field(field)

            if data_type == PywrParameter.tag.lower():
                # If the field is defined as general parameter then the actual
                # type might be something more specific.
                try:
                    data_type = data_type_from_parameter_value(
                        pywr_node[name]).tag
                except ValueError:
                    log.warning(f'No Hydra data type for Pywr field "{name}"'
                                f' on node type "{node_type}" found.')

                #TODO: hack to ignore these when they reference parameters elsewhere
                if data_type.lower() == 'descriptor' and pywr_node[name].find(
                        f"__{node_name}__") >= 0:

                    log.warn(
                        f"Ignoring descriptor %s on attribute %s, node %s as this it is assumed this is defined as a parameter, and so will be set as an attribute through the parameters.",
                        pywr_node[name], name, node_name)
                    continue

            # Key is the attribute name. The attributes need to already by added to the
            # database and hence have a valid id.
            attribute_id = self.attribute_name_id_map[name]

            unit_id = self.attr_unit_map.get(attribute_id)

            yield self._make_dataset_resource_attribute_and_scenario(
                name,
                pywr_node[name],
                data_type,
                attribute_id,
                unit_id=unit_id,
                encode_to_json=True)
Beispiel #23
0
def validate_network(network_id, template_id, scenario_id=None, **kwargs):
    """
        Given a network, scenario and template, ensure that all the nodes, links & groups
        in the network have the correct resource attributes as defined by
        the types in the template.
        Also ensure valid entries in tresourcetype.
        This validation will not fail if a resource has more than the required type,
        but will fail if it has fewer or if any attribute has a
        conflicting dimension or unit.
    """

    network = db.DBSession.query(Network).filter(
        Network.id == network_id).options(noload('scenarios')).first()

    if network is None:
        raise HydraError("Could not find network %s"%(network_id))

    resource_scenario_dict = {}
    if scenario_id is not None:
        scenario = db.DBSession.query(Scenario).filter(Scenario.id == scenario_id).first()

        if scenario is None:
            raise HydraError("Could not find scenario %s"%(scenario_id,))

        for rs in scenario.resourcescenarios:
            resource_scenario_dict[rs.resource_attr_id] = rs

    template = db.DBSession.query(Template).filter(
        Template.id == template_id).first()

    if template is None:
        raise HydraError("Could not find template %s"%(template_id,))

    resource_type_defs = {
        'NETWORK' : {},
        'NODE'    : {},
        'LINK'    : {},
        'GROUP'   : {},
    }
    for tt in template.get_types():
        resource_type_defs[tt.resource_type][tt.id] = tt

    errors = []
    #Only check if there are type definitions for a network in the template.
    if resource_type_defs.get('NETWORK'):
        net_types = resource_type_defs['NETWORK']
        errors.extend(validate_resource(network, net_types, resource_scenario_dict))

    #check all nodes
    if resource_type_defs.get('NODE'):
        node_types = resource_type_defs['NODE']
        for node in network.nodes:
            errors.extend(validate_resource(node, node_types, resource_scenario_dict))

    #check all links
    if resource_type_defs.get('LINK'):
        link_types = resource_type_defs['LINK']
        for link in network.links:
            errors.extend(validate_resource(link, link_types, resource_scenario_dict))

    #check all groups
    if resource_type_defs.get('GROUP'):
        group_types = resource_type_defs['GROUP']
        for group in network.resourcegroups:
            errors.extend(validate_resource(group, group_types, resource_scenario_dict))

    return errors
Beispiel #24
0
def _parse_xml_typeattr(type_i, attribute, user_id=None):
    """
        convert a typeattr etree element and turn it into a hydra type attr
    """

    attr = _parse_xml_attribute(attribute)

    for ta in type_i.typeattrs:
        if ta.attr_id == attr.id:
            # Find the TypeAttr
            typeattr_i = ta
            break
    else:
        # Creating a new TypeAttr
        typeattr_i = TypeAttr()
        log.debug("Creating type attr: type_id=%s, attr_id=%s", type_i.id,
                  attr.id)
        typeattr_i.type_id = type_i.id
        typeattr_i.attr_id = attr.id
        type_i.typeattrs.append(typeattr_i)
        db.DBSession.add(typeattr_i)

    typeattr_unit_id = None
    if attribute.find('unit') is not None:
        # Found the unit as child at first level
        unit = attribute.find('unit').text
        if unit not in ('', None):
            typeattr_unit_id = units.get_unit_by_abbreviation(unit).id

    if typeattr_unit_id is not None:
        typeattr_i.unit_id = typeattr_unit_id

    check_dimension(typeattr_i)

    if attribute.find('description') is not None:
        typeattr_i.description = attribute.find('description').text

    if attribute.find('properties') is not None:
        properties_string = get_etree_layout_as_dict(
            attribute.find('properties'))
        typeattr_i.properties = str(properties_string)

    if attribute.find('is_var') is not None:
        typeattr_i.attr_is_var = attribute.find('is_var').text
    if attribute.find('data_type') is not None:
        typeattr_i.data_type = attribute.find('data_type').text

    # Analyzing the "default" node
    if attribute.find('default') is not None:
        default = attribute.find('default')

        dataset_unit_id = None
        if default.find('unit') is not None:
            dataset_unit = default.find('unit').text
            if dataset_unit not in ('', None):
                dataset_unit_id = units.get_unit_by_abbreviation(
                    dataset_unit).id

        if dataset_unit_id is None and typeattr_i.unit_id is not None:
            dataset_unit = typeattr_i.unit_id

        if dataset_unit_id is not None and typeattr_i.unit_id is not None:
            if dataset_unit_id != typeattr_i.unit_id:
                raise HydraError(
                    f"Default value has a unit of {typeattr_i.unit_id}" +
                    "but the attribute" +
                    f" says the unit should be: {dataset_unit_id}")

        val = default.find('value').text
        try:
            Decimal(val)
            data_type = 'scalar'
        except:
            data_type = 'descriptor'

        dataset = add_dataset(data_type,
                              val,
                              dataset_unit_id,
                              name="%s Default" % attr.name,
                              user_id=user_id)

        typeattr_i.default_dataset_id = dataset.id

    if attribute.find('restrictions') is not None:
        restriction = str(
            dataset_util.get_restriction_as_dict(
                attribute.find('restrictions')))
        typeattr_i.data_restriction = restriction
    else:
        typeattr_i.data_restriction = None

    return typeattr_i