Пример #1
0
    def create_dataframe(self, resource_attr, name='Test Data Frame', dataframe_value=None, unit='m^3 s^-1'):
        #A scenario attribute is a piece of data associated
        #with a resource attribute.

        if dataframe_value is None:
            val_1 = 1
            val_2 = 2
            val_3 = 3

            dataframe_value = {"test_column": {'key1': val_1,
                        'key2': val_2,
                        'key3': val_3}}

        metadata = {'created_by': 'Test user'}

        dataset = Dataset(dict(
            id=None,
            type = 'dataframe',
            name = name,
            unit_id = self.get_unit(unit).id,
            hidden = 'N',
            value = json.dumps(dataframe_value),
            metadata = metadata
        ))

        scenario_attr = JSONObject(dict(
            attr_id = resource_attr.attr_id,
            resource_attr_id = resource_attr.id,
            dataset = dataset,
        ))

        return scenario_attr
Пример #2
0
    def create_array(self, resource_attr):
        #A scenario attribute is a piece of data associated
        #with a resource attribute.
        #[[1, 2, 3], [4, 5, 6], [7, 8, 9]]

        arr = json.dumps([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])

        metadata_array = json.dumps({'created_by': 'Test user'})

        dataset = Dataset(dict(
            id=None,
            type = 'array',
            name = 'my array',
            unit_id = self.get_unit('bar').id,
            hidden = 'N',
            value = arr,
            metadata = metadata_array,
        ))

        scenario_attr = JSONObject(dict(
            attr_id = resource_attr.attr_id,
            resource_attr_id = resource_attr.id,
            dataset = dataset,
        ))

        return scenario_attr
Пример #3
0
def combine_dataframes(dataframes):
    """
        Take a list of pandas dataframes with the same index and combine
        them into a single multi-column dataframe.
    """
    #merge the datframes, assuming they have the same inndex (axis=1 does that)
    concat_df = pandas.concat(dataframes, axis=1)

    dataset = Dataset({
        'name': 'Combined Dataframe',
        'type': 'dataframe',
        'value': concat_df.to_json()
    })

    return dataset
Пример #4
0
    def create_timeseries(self, resource_attr, unit='m^3'):
        #A scenario attribute is a piece of data associated
        #with a resource attribute.
        #[[[1, 2, "hello"], [5, 4, 6]], [[10, 20, 30], [40, 50, 60]]]

        fmt = hydra_base.config.get('DEFAULT', 'datetime_format',
                                    "%Y-%m-%dT%H:%M:%S.%f000Z")

        t1 = datetime.datetime.now(datetime.timezone.utc)
        t2 = t1 + datetime.timedelta(hours=1)
        t3 = t1 + datetime.timedelta(hours=2)

        val_1 = [[[1, 2, "hello"], [5, 4, 6]], [[10, 20, 30], [40, 50, 60]],
                 [[9, 8, 7], [6, 5, 4]]]
        val_2 = [1.0, 2.0, 3.0]

        val_3 = [3.0, None, None]

        ts_val = {
            "test_column": {
                t1.strftime(fmt): val_1,
                t2.strftime(fmt): val_2,
                t3.strftime(fmt): val_3
            }
        }

        metadata = {'created_by': 'Test user'}

        dataset = Dataset(
            dict(
                id=None,
                type='timeseries',
                name='my time series',
                unit_id=self.get_unit(unit).
                id,  # This does not match the type on purpose, to test validation
                hidden='N',
                value=json.dumps(ts_val),
                metadata=metadata))

        scenario_attr = JSONObject(
            dict(
                attr_id=resource_attr.attr_id,
                resource_attr_id=resource_attr.id,
                dataset=dataset,
            ))

        return scenario_attr
Пример #5
0
    def create_scalar(self, resource_attr, val=1.234, unit='m^3'):
        #with a resource attribute.

        dataset = Dataset(dict(
            id=None,
            type = 'scalar',
            name = 'Flow speed',
            unit_id =  self.get_unit(unit).id,
            hidden = 'N',
            value = val,
        ))

        scenario_attr = JSONObject(dict(
            attr_id=resource_attr.attr_id,
            resource_attr_id=resource_attr.id,
            dataset=dataset,
        ))

        return scenario_attr
Пример #6
0
    def create_descriptor(self, resource_attr, val="test"):
        #A scenario attribute is a piece of data associated
        #with a resource attribute.

        dataset = Dataset(dict(
            id=None,
            type = 'descriptor',
            name = 'Flow speed',
            unit_id = self.get_unit('m s^-1').id, # This does not match the type on purpose, to test validation
            hidden = 'N',
            value = val,
        ))

        scenario_attr = JSONObject(dict(
            attr_id = resource_attr.attr_id,
            resource_attr_id = resource_attr.id,
            dataset = dataset,
        ))

        return scenario_attr
Пример #7
0
def import_dataframe(client,
                     dataframe,
                     network_id,
                     scenario_id,
                     attribute_id,
                     column=None,
                     create_new=False,
                     data_type='PYWR_DATAFRAME',
                     overwrite=False):
    """
    args:
        client: (JSONConnection): The hydra client object
        dataframe (pandas dataframe): pandas dataframe read from excel
        network_id (int): the network ID
        scenario_id (int): THe scenario ID
        attribute_id (int): The attribute ID to update.
        column (string): The name of the specific colum to use. If None, uses all of them.
        create_new (bool): default False : If an node attribute doesn't exist, create it.
        data_type (ENUM (PYWR_DATAFRAME, DATAFRAME)): The data type the new dataset should be.
        overwrite (bool): If true, it overwrites an existing valuye with the new one. If false
                          it will try to update the existing value. The data type of the existing
                          value must match that of the updating value
    """
    # Find all the nodes in the network

    node_data = {}
    for node_name in dataframe:
        # An exception is raised by hydra if the node name does not exist.
        node = client.get_node_by_name(network_id, node_name)

        # Fetch the node's data
        resource_scenarios = client.get_resource_data('NODE', node['id'],
                                                      scenario_id)
        for resource_scenario in resource_scenarios:
            resource_attribute_id = resource_scenario['resource_attr_id']
            resource_attribute = client.get_resource_attribute(
                resource_attribute_id)

            if resource_attribute['attr_id'] != attribute_id:
                continue  # Skip the wrong attribute data

            dataset = resource_scenario['dataset']

            if dataset['type'].lower() != data_type.lower(
            ) and overwrite == False:
                raise ValueError(
                    f'Node "{node_name}" datatset for attribute_id'
                    f' {attribute_id}" must be'
                    f' type "{dataset["type"]}", not type "{data_type.upper()}".'
                )

            dataset['value'] = make_dataframe_dataset_value(
                dataset['value'],
                dataframe[node_name],
                data_type,
                column,
                node_name,
                overwrite=overwrite)
            #update the data type if necessary
            dataset[
                'type'] = dataset['type'] if overwrite is False else data_type

            node_data[node_name] = {
                'node_id': node['id'],
                'resource_attribute_id': resource_attribute['id'],
                'dataset': dataset,
            }

        if node_name not in node_data:
            if not create_new:
                # No resource attribute found!
                raise ValueError(
                    f'Node "{node_name}" does not contain a resource attribute '
                    f'for the attribute "{attribute_id}".')
            else:
                resource_attribute = client.add_resource_attribute(
                    'NODE',
                    node['id'],
                    attribute_id,
                    'N',
                    error_on_duplicate=False)

                df = dataframe[node_name].to_frame()
                df.columns = [column]

                if data_type.lower() == 'dataframe':
                    # Embed data as strings of datetimes rather than timestamps.
                    df.index = df.index.astype(str)
                    value = df.to_json(orient='columns')
                else:
                    default_value = json.dumps({
                        "type": "dataframeparameter",
                        "pandas_kwargs": {
                            "parse_dates": True
                        }
                    })

                    value = make_dataframe_dataset_value(default_value,
                                                         df,
                                                         data_type,
                                                         column,
                                                         node_name,
                                                         overwrite=overwrite)

                dataset = Dataset({
                    'name': "data",
                    'value': value,
                    "hidden": "N",
                    "type": data_type.upper(),
                    "unit": "-",
                })

                node_data[node_name] = {
                    'node_id': node['id'],
                    'resource_attribute_id': resource_attribute['id'],
                    'dataset': dataset,
                }

    # Now update the database with the new data
    for node_name, data in node_data.items():
        client.add_data_to_attribute(scenario_id,
                                     data['resource_attribute_id'],
                                     data['dataset'])
Пример #8
0
def pywr_with_demand_pattern(model_directory, db_with_template, projectmaker,
                             logged_in_client):
    client = logged_in_client

    # Create the basic pywr model
    project = projectmaker.create()
    pywr_json_filename = os.path.join(model_directory, 'simple1.json')
    template = client.get_template_by_name(pywr_template_name('Full'))

    importer = PywrHydraImporter.from_client(client, pywr_json_filename,
                                             template['id'])
    network_id, scenario_id = importer.import_data(client, project.id)

    # Create the demand pattern
    pattern_attr = client.add_attribute({'name': 'demand_pattern'})
    ra = client.add_resource_attribute('NETWORK', network_id,
                                       pattern_attr['id'], 'N')

    with open(os.path.join(model_directory,
                           'simple_demand_pattern.json')) as fh:
        pattern_str = fh.read()

    pattern_data = Dataset({
        'name': 'demand_pattern',
        'value': pattern_str,
        "hidden": "N",
        "type": 'PYWR_PARAMETER_PATTERN',
    })

    client.add_data_to_attribute(scenario_id, ra['id'], pattern_data)

    # Assign the pattern to one of the nodes
    node = client.get_node_by_name(network_id, 'demand1')
    pattern_ref_attr = client.add_attribute({'name': 'demand'})
    ra = client.add_resource_attribute('NODE', node['id'],
                                       pattern_ref_attr['id'], 'N')

    pattern_ref_data = Dataset({
        'name': 'demand',
        'value': 'demand_pattern',
        'hidden': 'N',
        'type': 'PYWR_PARAMETER_PATTERN_REF'
    })

    client.add_data_to_attribute(scenario_id, ra['id'], pattern_ref_data)

    #
    population_attr = client.add_attribute({'name': 'population'})
    ra = client.add_resource_attribute('NODE', node['id'],
                                       population_attr['id'], 'N')

    population_data = Dataset({
        'name': 'population',
        'value': 3.14,
        'hidden': 'N',
        'type': 'SCALAR'
    })

    client.add_data_to_attribute(scenario_id, ra['id'], population_data)

    return network_id, scenario_id
Пример #9
0
def import_template_dict(template_dict, allow_update=True, **kwargs):

    user_id = kwargs.get('user_id')

    template_file_j = template_dict

    file_attributes = template_file_j.get('attributes')
    file_datasets = template_file_j.get('datasets', {})
    template_j = JSONObject(template_file_j.get('template', {}))

    #default datasets are optional, so don't force them to exist in the structure
    default_datasets_j = {}
    for k, v in file_datasets.items():
        default_datasets_j[int(k)] = Dataset(v)

    if file_attributes is None or len(template_j) == 0:
        raise HydraError("Invalid template. The template must have the following structure: " + \
                            "{'attributes':\\{...\\}, 'datasets':\\{...\\}, 'template':\\{...\\}}")

    #Normalise attribute IDs so they're always ints (in case they're specified as strings)
    attributes_j = {}
    for k, v in file_attributes.items():
        attributes_j[int(k)] = JSONObject(v)

    template_name = template_j.name
    template_description = template_j.description

    template_layout = None
    if template_j.layout is not None:
        if isinstance(template_j.layout, dict):
            template_layout = json.dumps(template_j.layout)
        else:
            template_layout = template_j.layout

    try:
        template_i = db.DBSession.query(Template).filter(
            Template.name == template_name).options(
                joinedload('templatetypes').joinedload('typeattrs').joinedload(
                    'attr')).one()
        if allow_update == False:
            raise HydraError("Existing Template Found with name %s" %
                             (template_name, ))
        else:
            template_i.layout = template_layout
            template_i.description = template_description
    except NoResultFound:
        log.debug("Template not found. Creating new one. name=%s",
                  template_name)
        template_i = Template(name=template_name,
                              description=template_description,
                              layout=template_layout)
        db.DBSession.add(template_i)

    types_j = template_j.templatetypes
    type_id_map = {r.id: r for r in template_i.templatetypes}
    #Delete any types which are in the DB but no longer in the JSON file
    type_name_map = {r.name: r.id for r in template_i.templatetypes}
    attr_name_map = {}
    for type_i in template_i.templatetypes:
        for typeattr in type_i.typeattrs:
            attr_name_map[typeattr.attr.name] = (typeattr.attr_id,
                                                 typeattr.type_id)

    existing_types = set([r.name for r in template_i.templatetypes])
    log.debug(
        ["%s : %s" % (tt.name, tt.id) for tt in template_i.templatetypes])
    log.debug("Existing types: %s", existing_types)

    new_types = set([t.name for t in types_j])
    log.debug("New Types: %s", new_types)

    types_to_delete = existing_types - new_types
    log.debug("Types to delete: %s", types_to_delete)
    log.debug(type_name_map)
    for type_to_delete in types_to_delete:
        type_id = type_name_map[type_to_delete]
        try:
            for i, tt in enumerate(template_i.templatetypes):
                if tt.id == type_id:
                    type_i = template_i.templatetypes[i]

                    #first remove all the type attributes associated to the type
                    for ta_i in type_i.typeattrs:
                        db.DBSession.delete(ta_i)

                    del (template_i.templatetypes[i])
                    log.debug("Deleting type %s (%s)", type_i.name, type_i.id)
                    del (type_name_map[type_to_delete])
                    db.DBSession.delete(type_i)
        except NoResultFound:
            pass

    #Add or update types.
    for type_j in types_j:
        type_name = type_j.name

        #check if the type is already in the DB. If not, create a new one.
        type_is_new = False
        if type_name in existing_types:
            type_id = type_name_map[type_name]
            type_i = type_id_map[type_id]
        else:
            log.debug("Type %s not found, creating new one.", type_name)
            type_i = TemplateType()
            type_i.name = type_name
            template_i.templatetypes.append(type_i)
            type_i.status = 'A'  ## defaults to active
            type_is_new = True

        if type_j.description is not None:
            type_i.description = type_j.description

        if type_j.alias is not None:
            type_i.alias = type_j.alias

        #Allow 'type' or 'resource_type' to be accepted
        if type_j.type is not None:
            type_i.resource_type = type_j.type
        elif type_j.resource_type is not None:
            type_i.resource_type = type_j.resource_type

        if type_j.resource_type is None:
            raise HydraError("No resource type specified."
                             " 'NODE', 'LINK', 'GROUP' or 'NETWORK'")

        if type_j.layout is not None:
            if isinstance(type_j, dict):
                type_i.layout = json.dumps(type_j.layout)
            else:
                type_i.layout = type_j.layout

        #delete any TypeAttrs which are in the DB but not in the JSON file
        existing_attrs = []
        if not type_is_new:
            for r in template_i.templatetypes:
                if r.name == type_name:
                    for typeattr in r.typeattrs:
                        existing_attrs.append(typeattr.attr.name)

        existing_attrs = set(existing_attrs)

        type_attrs = []
        for typeattr_j in type_j.typeattrs:
            if typeattr_j.attr_id is not None:
                attr_j = attributes_j[typeattr_j.attr_id].name
            elif typeattr_j.attr is not None:
                attr_j = typeattr_j.attr.name
            type_attrs.append(attr_j)

        type_attrs = set(type_attrs)

        attrs_to_delete = existing_attrs - type_attrs
        for attr_to_delete in attrs_to_delete:
            attr_id, type_id = attr_name_map[attr_to_delete]
            try:
                attr_i = db.DBSession.query(TypeAttr).filter(
                    TypeAttr.attr_id == attr_id,
                    TypeAttr.type_id == type_id).options(
                        joinedload('attr')).one()
                db.DBSession.delete(attr_i)
                log.debug("Attr %s in type %s deleted", attr_i.attr.name,
                          attr_i.templatetype.name)
            except NoResultFound:
                log.debug("Attr %s not found in type %s" % (attr_id, type_id))
                continue

        #Add or update type typeattrs
        #Support an external attribute dict or embedded attributes.
        for typeattr_j in type_j.typeattrs:
            if typeattr_j.attr_id is not None:
                attr_j = attributes_j[typeattr_j.attr_id]
            elif typeattr_j.attr is not None:
                attr_j = typeattr_j.attr

            default_dataset_j = None
            if typeattr_j.default_dataset is not None:
                default_dataset_j = typeattr_j.default_dataset
            elif typeattr_j.default is not None:  # for backward compatibility
                default_dataset_j = typeattr_j.default
            elif typeattr_j.default_dataset_id is not None:
                default_dataset_j = default_datasets_j[int(
                    typeattr_j.default_dataset_id)]

            parse_json_typeattr(type_i,
                                typeattr_j,
                                attr_j,
                                default_dataset_j,
                                user_id=user_id)

    db.DBSession.flush()

    return template_i