Ejemplo n.º 1
0
def _get_existing_data(hashes):

    str_hashes = [str(h) for h in hashes]

    hash_dict = {}

    datasets = []
    if len(str_hashes) > qry_in_threshold:
        idx = 0
        extent = qry_in_threshold
        while idx < len(str_hashes):
            log.info("Querying %s datasets", len(str_hashes[idx:extent]))
            rs = DBSession.query(Dataset).filter(
                Dataset.data_hash.in_(str_hashes[idx:extent])).all()
            datasets.extend(rs)
            idx = idx + qry_in_threshold

            if idx + qry_in_threshold > len(str_hashes):
                extent = len(str_hashes)
            else:
                extent = extent + qry_in_threshold
    else:
        datasets = DBSession.query(Dataset).filter(
            Dataset.data_hash.in_(str_hashes))

    for r in datasets:
        hash_dict[r.data_hash] = r

    log.info("Retrieved %s datasets", len(hash_dict))

    return hash_dict
Ejemplo n.º 2
0
def _get_datasets(dataset_ids):
    """
        Get all the datasets in a list of dataset IDS. This must be done in chunks of 999,
        as sqlite can only handle 'in' with < 1000 elements.
    """

    dataset_dict = {}

    datasets = []
    if len(dataset_ids) > qry_in_threshold:
        idx = 0
        extent = qry_in_threshold
        while idx < len(dataset_ids):
            log.info("Querying %s datasets", len(dataset_ids[idx:extent]))
            rs = DBSession.query(Dataset).filter(
                Dataset.dataset_id.in_(dataset_ids[idx:extent])).all()
            datasets.extend(rs)
            idx = idx + qry_in_threshold

            if idx + qry_in_threshold > len(dataset_ids):
                extent = len(dataset_ids)
            else:
                extent = extent + qry_in_threshold
    else:
        datasets = DBSession.query(Dataset).filter(
            Dataset.dataset_id.in_(dataset_ids))

    for r in datasets:
        dataset_dict[r.dataset_id] = r

    log.info("Retrieved %s datasets", len(dataset_dict))

    return dataset_dict
Ejemplo n.º 3
0
def get_resource_attributes(ref_key, ref_id, type_id=None, **kwargs):
    """
        Get all the resource attributes for a given resource. 
        If type_id is specified, only
        return the resource attributes within the type.
    """

    user_id = kwargs.get('user_id')

    resource_attr_qry = DBSession.query(ResourceAttr).filter(
        ResourceAttr.ref_key == ref_key,
        or_(ResourceAttr.network_id == ref_id, ResourceAttr.node_id == ref_id,
            ResourceAttr.link_id == ref_id, ResourceAttr.group_id == ref_id))

    if type_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).filter(
            TypeAttr.type_id == type_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_attr_qry = resource_attr_qry.filter(
            ResourceAttr.attr_id.in_(attr_ids))

    resource_attrs = resource_attr_qry.all()

    return resource_attrs
Ejemplo n.º 4
0
def _get_metadata(dataset_ids):
    """
        Get all the metadata for a given list of datasets
    """
    metadata = []
    if len(dataset_ids) == 0:
        return []
    if len(dataset_ids) > qry_in_threshold:
        idx = 0
        extent = qry_in_threshold
        while idx < len(dataset_ids):
            log.info("Querying %s metadatas", len(dataset_ids[idx:extent]))
            rs = DBSession.query(Metadata).filter(
                Metadata.dataset_id.in_(dataset_ids[idx:extent])).all()
            metadata.extend(rs)
            idx = idx + qry_in_threshold

            if idx + qry_in_threshold > len(dataset_ids):
                extent = len(dataset_ids)
            else:
                extent = extent + qry_in_threshold
    else:
        metadata_qry = DBSession.query(Metadata).filter(
            Metadata.dataset_id.in_(dataset_ids))
        for m in metadata_qry:
            metadata.append(m)

    return metadata
Ejemplo n.º 5
0
def get_attribute_data(attr_ids, node_ids, **kwargs):
    """
        For a given attribute or set of attributes, return  all the resources and
        resource scenarios in the network
    """
    node_attrs = DBSession.query(ResourceAttr).\
                                            options(joinedload_all('attr')).\
                                            filter(ResourceAttr.node_id.in_(node_ids),
                                            ResourceAttr.attr_id.in_(attr_ids)).all()

    ra_ids = []
    for ra in node_attrs:
        ra_ids.append(ra.resource_attr_id)

    resource_scenarios = DBSession.query(ResourceScenario).filter(
        ResourceScenario.resource_attr_id.in_(ra_ids)).options(
            joinedload('resourceattr')).options(
                joinedload_all('dataset.metadata')).order_by(
                    ResourceScenario.scenario_id).all()

    for rs in resource_scenarios:
        if rs.dataset.hidden == 'Y':
            try:
                rs.dataset.check_read_permission(kwargs.get('user_id'))
            except:
                rs.dataset.value = None
                rs.dataset.frequency = None
                rs.dataset.start_time = None
        DBSession.expunge(rs)

    return node_attrs, resource_scenarios
Ejemplo n.º 6
0
def set_rs_dataset(resource_attr_id, scenario_id, dataset_id, **kwargs):
    rs = DBSession.query(ResourceScenario).filter(
        ResourceScenario.resource_attr_id == resource_attr_id,
        ResourceScenario.scenario_id == scenario_id).first()

    if rs is None:
        raise ResourceNotFoundError(
            "Resource scenario for resource attr %s not found in scenario %s" %
            (resource_attr_id, scenario_id))

    dataset = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset_id).first()

    if dataset is None:
        raise ResourceNotFoundError("Dataset %s not found" % (dataset_id, ))

    rs.dataset_id = dataset_id

    DBSession.flush()

    rs = DBSession.query(ResourceScenario).filter(
        ResourceScenario.resource_attr_id == resource_attr_id,
        ResourceScenario.scenario_id == scenario_id).first()

    return rs
Ejemplo n.º 7
0
def _get_metadata(dataset_ids):
    """
        Get all the metadata for a given list of datasets
    """
    metadata = []
    if len(dataset_ids) == 0:
        return []
    if len(dataset_ids) > qry_in_threshold:
        idx = 0
        extent = qry_in_threshold
        while idx < len(dataset_ids):
            log.info("Querying %s metadatas", len(dataset_ids[idx:extent]))
            rs = DBSession.query(Metadata).filter(Metadata.dataset_id.in_(dataset_ids[idx:extent])).all()
            metadata.extend(rs)
            idx = idx + qry_in_threshold

            if idx + qry_in_threshold > len(dataset_ids):
                extent = len(dataset_ids)
            else:
                extent = extent +qry_in_threshold
    else:
        metadata_qry = DBSession.query(Metadata).filter(Metadata.dataset_id.in_(dataset_ids))
        for m in metadata_qry:
            metadata.append(m)

    return metadata
Ejemplo n.º 8
0
def _get_group(group_id):
    try:
        DBSession.query(ResourceGroup).filter(
            ResourceGroup.group_id == group_id).one()
    except NoResultFound:
        raise ResourceNotFoundError("ResourceGroup %s not found" %
                                    (group_id, ))
Ejemplo n.º 9
0
def get_attribute_data(attr_ids, node_ids, **kwargs):
    """
        For a given attribute or set of attributes, return  all the resources and
        resource scenarios in the network
    """
    node_attrs = DBSession.query(ResourceAttr).\
                                            options(joinedload_all('attr')).\
                                            filter(ResourceAttr.node_id.in_(node_ids),
                                            ResourceAttr.attr_id.in_(attr_ids)).all()

    ra_ids = []
    for ra in node_attrs:
        ra_ids.append(ra.resource_attr_id)


    resource_scenarios = DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all()


    for rs in resource_scenarios:
       if rs.dataset.hidden == 'Y':
           try:
                rs.dataset.check_read_permission(kwargs.get('user_id'))
           except:
               rs.dataset.value      = None
               rs.dataset.frequency  = None
               rs.dataset.start_time = None
       DBSession.expunge(rs)

    return node_attrs, resource_scenarios
Ejemplo n.º 10
0
def _get_existing_data(hashes):

    str_hashes = [str(h) for h in hashes]

    hash_dict = {}

    datasets = []
    if len(str_hashes) > qry_in_threshold:
        idx = 0
        extent =qry_in_threshold
        while idx < len(str_hashes):
            log.info("Querying %s datasets", len(str_hashes[idx:extent]))
            rs = DBSession.query(Dataset).filter(Dataset.data_hash.in_(str_hashes[idx:extent])).all()
            datasets.extend(rs)
            idx = idx + qry_in_threshold

            if idx + qry_in_threshold > len(str_hashes):
                extent = len(str_hashes)
            else:
                extent = extent + qry_in_threshold
    else:
        datasets = DBSession.query(Dataset).filter(Dataset.data_hash.in_(str_hashes))


    for r in datasets:
        hash_dict[r.data_hash] = r

    log.info("Retrieved %s datasets", len(hash_dict))

    return hash_dict
Ejemplo n.º 11
0
def _get_datasets(dataset_ids):
    """
        Get all the datasets in a list of dataset IDS. This must be done in chunks of 999,
        as sqlite can only handle 'in' with < 1000 elements.
    """

    dataset_dict = {}

    datasets = []
    if len(dataset_ids) > qry_in_threshold:
        idx = 0
        extent =qry_in_threshold
        while idx < len(dataset_ids):
            log.info("Querying %s datasets", len(dataset_ids[idx:extent]))
            rs = DBSession.query(Dataset).filter(Dataset.dataset_id.in_(dataset_ids[idx:extent])).all()
            datasets.extend(rs)
            idx = idx + qry_in_threshold

            if idx + qry_in_threshold > len(dataset_ids):
                extent = len(dataset_ids)
            else:
                extent = extent + qry_in_threshold
    else:
        datasets = DBSession.query(Dataset).filter(Dataset.dataset_id.in_(dataset_ids))


    for r in datasets:
        dataset_dict[r.dataset_id] = r

    log.info("Retrieved %s datasets", len(dataset_dict))

    return dataset_dict
Ejemplo n.º 12
0
def get_resource_attributes(ref_key, ref_id, type_id=None, **kwargs):
    """
        Get all the resource attributes for a given resource. 
        If type_id is specified, only
        return the resource attributes within the type.
    """

    user_id = kwargs.get('user_id')
    
    resource_attr_qry = DBSession.query(ResourceAttr).filter(
        ResourceAttr.ref_key == ref_key,
        or_(
            ResourceAttr.network_id==ref_id,
            ResourceAttr.node_id==ref_id,
            ResourceAttr.link_id==ref_id,
            ResourceAttr.group_id==ref_id
        ))
     
    if type_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_attr_qry = resource_attr_qry.filter(ResourceAttr.attr_id.in_(attr_ids))
    
    resource_attrs = resource_attr_qry.all()

    return resource_attrs
Ejemplo n.º 13
0
def make_root_user():

    try:
        user = DBSession.query(User).filter(User.username == 'root').one()
    except NoResultFound:
        user = User(username='******',
                    password=bcrypt.hashpw('', bcrypt.gensalt()),
                    display_name='Root User')
        DBSession.add(user)

    try:
        role = DBSession.query(Role).filter(Role.role_code == 'admin').one()
    except NoResultFound:
        raise HydraError("Admin role not found.")

    try:
        userrole = DBSession.query(RoleUser).filter(
            RoleUser.role_id == role.role_id,
            RoleUser.user_id == user.user_id).one()
    except NoResultFound:
        userrole = RoleUser(role_id=role.role_id, user_id=user.user_id)
        user.roleusers.append(userrole)
        DBSession.add(userrole)
    DBSession.flush()
    transaction.commit()
Ejemplo n.º 14
0
def get_resourcescenarios(resource_attr_ids, scenario_ids, **kwargs):
    """
        Retrieve all the datasets in a scenario for a given attribute.
        Also return the resource attributes so there is a reference to the node/link
    """

    #Make sure the resource_attr_ids are valid
    check_ra_qry = DBSession.query(ResourceAttr).filter(
        ResourceAttr.resource_attr_id.in_(resource_attr_ids)).all()
    if len(check_ra_qry) != len(resource_attr_ids):
        raise HydraError(
            "Unrecognised resource attribues %s were found in list" %
            (resource_attr_ids, ))

    #Make sure the scenario ids are valid
    scen_qry = DBSession.query(Scenario).filter(
        Scenario.scenario_id.in_(scenario_ids)).all()
    if len(scen_qry) != len(scenario_ids):
        raise HydraError(
            "Unrecognised resource attribues %s were found in list" %
            (scenario_ids, ))

    rs_result = DBSession.query(ResourceScenario).filter(
        ResourceScenario.scenario_id.in_(scenario_ids),
        ResourceScenario.resource_attr_id.in_(resource_attr_ids)).all()

    return rs_result
Ejemplo n.º 15
0
def update_dataset(dataset_id, name, data_type, val, units, dimension, metadata={}, **kwargs):
    """
        Update an existing dataset
    """

    if dataset_id is None:
        raise HydraError("Dataset must have an ID to be updated.")

    user_id = kwargs.get('user_id')

    dataset = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
    #This dataset been seen before, so it may be attached
    #to other scenarios, which may be locked. If they are locked, we must
    #not change their data, so new data must be created for the unlocked scenarios
    locked_scenarios = []
    unlocked_scenarios = []
    for dataset_rs in dataset.resourcescenarios:
        if dataset_rs.scenario.locked == 'Y':
            locked_scenarios.append(dataset_rs)
        else:
            unlocked_scenarios.append(dataset_rs)

    #Are any of these scenarios locked?
    if len(locked_scenarios) > 0:
        #If so, create a new dataset and assign to all unlocked datasets.
        dataset = add_dataset(data_type,
                                val,
                                units,
                                dimension,
                                metadata=metadata,
                                name=name,
                                user_id=kwargs['user_id'])
        for unlocked_rs in unlocked_scenarios:
            unlocked_rs.dataset = dataset

    else:

        dataset.set_val(data_type, val)

        dataset.set_metadata(metadata)

        dataset.data_type  = data_type
        dataset.data_units = units
        dataset.data_name  = name
        dataset.data_dimen = dimension
        dataset.created_by = kwargs['user_id']
        dataset.data_hash  = dataset.set_hash()

        #Is there a dataset in the DB already which is identical to the updated dataset?
        existing_dataset = DBSession.query(Dataset).filter(Dataset.data_hash==dataset.data_hash, Dataset.dataset_id != dataset.dataset_id).first()
        if existing_dataset is not None and existing_dataset.check_user(user_id):
            log.warn("An identical dataset %s has been found to dataset %s."
                     " Deleting dataset and returning dataset %s",
                     existing_dataset.dataset_id, dataset.dataset_id, existing_dataset.dataset_id)
            DBSession.delete(dataset)
            dataset = existing_dataset

    return dataset
Ejemplo n.º 16
0
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs):
    """
        Using a resource attribute mapping, take the value from the source and apply
        it to the target. Both source and target scenarios must be specified (and therefor
        must exist).
    """
    rm = aliased(ResourceAttrMap, name='rm')
    #Check the mapping exists.
    mapping = DBSession.query(rm).filter(
        or_(
            and_(
                rm.resource_attr_id_a == source_resource_attr_id,
                rm.resource_attr_id_b == target_resource_attr_id
            ),
            and_(
                rm.resource_attr_id_a == target_resource_attr_id,
                rm.resource_attr_id_b == source_resource_attr_id
            )
        )
    ).first()

    if mapping is None:
        raise ResourceNotFoundError("Mapping between %s and %s not found"%
                                    (source_resource_attr_id,
                                     target_resource_attr_id))

    #check scenarios exist
    s1 = _get_scenario(source_scenario_id, False, False)
    s2 = _get_scenario(target_scenario_id, False, False)

    rs = aliased(ResourceScenario, name='rs')
    rs1 = DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id,
                                    rs.scenario_id == source_scenario_id).first()
    rs2 = DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id,
                                    rs.scenario_id == target_scenario_id).first()

    #3 possibilities worth considering:
    #1: Both RS exist, so update the target RS
    #2: Target RS does not exist, so create it with the dastaset from RS1
    #3: Source RS does not exist, so it must be removed from the target scenario if it exists
    return_value = None#Either return null or return a new or updated resource scenario
    if rs1 is not None:
        if rs2 is not None:
            log.info("Destination Resource Scenario exists. Updating dastaset ID")
            rs2.dataset_id = rs1.dataset_id
        else:
            log.info("Destination has no data, so making a new Resource Scenario")
            rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id)
            DBSession.add(rs2)
        DBSession.flush()
        return_value = rs2
    else:
        log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario")
        if rs2 is not None:
            DBSession.delete(rs2)

    DBSession.flush()
    return return_value
Ejemplo n.º 17
0
def update_dataset(dataset_id, name, data_type, val, units, dimension, metadata={}, **kwargs):
    """
        Update an existing dataset
    """

    if dataset_id is None:
        raise HydraError("Dataset must have an ID to be updated.")

    user_id = kwargs.get('user_id')

    dataset = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
    #This dataset been seen before, so it may be attached
    #to other scenarios, which may be locked. If they are locked, we must
    #not change their data, so new data must be created for the unlocked scenarios
    locked_scenarios = []
    unlocked_scenarios = []
    for dataset_rs in dataset.resourcescenarios:
        if dataset_rs.scenario.locked == 'Y':
            locked_scenarios.append(dataset_rs)
        else:
            unlocked_scenarios.append(dataset_rs)

    #Are any of these scenarios locked?
    if len(locked_scenarios) > 0:
        #If so, create a new dataset and assign to all unlocked datasets.
        dataset = add_dataset(data_type,
                                val,
                                units,
                                dimension,
                                metadata=metadata,
                                name=name,
                                user_id=kwargs['user_id'])
        for unlocked_rs in unlocked_scenarios:
            unlocked_rs.dataset = dataset

    else:

        dataset.set_val(data_type, val)

        dataset.set_metadata(metadata)

        dataset.data_type  = data_type
        dataset.data_units = units
        dataset.data_name  = name
        dataset.data_dimen = dimension
        dataset.created_by = kwargs['user_id']
        dataset.data_hash  = dataset.set_hash()

        #Is there a dataset in the DB already which is identical to the updated dataset?
        existing_dataset = DBSession.query(Dataset).filter(Dataset.data_hash==dataset.data_hash, Dataset.dataset_id != dataset.dataset_id).first()
        if existing_dataset is not None and existing_dataset.check_user(user_id):
            log.warn("An identical dataset %s has been found to dataset %s."
                     " Deleting dataset and returning dataset %s",
                     existing_dataset.dataset_id, dataset.dataset_id, existing_dataset.dataset_id)
            DBSession.delete(dataset)
            dataset = existing_dataset

    return dataset
Ejemplo n.º 18
0
def convert_dataset(dataset_id, to_unit, **kwargs):
    """Convert a whole dataset (specified by 'dataset_id' to new unit
    ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function
    returns the dataset ID of new dataset.
    """

    ds_i = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset_id).one()

    dataset_type = ds_i.data_type

    dsval = ds_i.get_val()
    old_unit = ds_i.data_units

    if old_unit is not None:
        if dataset_type == 'scalar':
            new_val = hydra_units.convert(float(dsval), old_unit, to_unit)
        elif dataset_type == 'array':
            dim = array_dim(dsval)
            vecdata = arr_to_vector(dsval)
            newvec = hydra_units.convert(vecdata, old_unit, to_unit)
            new_val = vector_to_arr(newvec, dim)
        elif dataset_type == 'timeseries':
            new_val = []
            for ts_time, ts_val in dsval.items():
                dim = array_dim(ts_val)
                vecdata = arr_to_vector(ts_val)
                newvec = hydra_units.convert(vecdata, old_unit, to_unit)
                newarr = vector_to_arr(newvec, dim)
                new_val.append(ts_time, newarr)
        elif dataset_type == 'descriptor':
            raise HydraError('Cannot convert descriptor.')

        new_dataset = Dataset()
        new_dataset.data_units = to_unit
        new_dataset.set_val(dataset_type, new_val)
        new_dataset.data_dimen = ds_i.data_dimen
        new_dataset.data_name = ds_i.data_name
        new_dataset.data_type = ds_i.data_type
        new_dataset.hidden = 'N'
        new_dataset.set_metadata(ds_i.get_metadata_as_dict())
        new_dataset.set_hash()

        existing_ds = DBSession.query(Dataset).filter(
            Dataset.data_hash == new_dataset.data_hash).first()

        if existing_ds is not None:
            DBSession.expunge_all()
            return existing_ds.dataset_id

        DBSession.add(new_dataset)
        DBSession.flush()

        return new_dataset.dataset_id

    else:
        raise HydraError('Dataset has no units.')
Ejemplo n.º 19
0
def clone_dataset(dataset_id, **kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))

    if dataset_id is None:
        return None

    dataset = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset_id).options(
            joinedload_all('metadata')).first()

    if dataset is None:
        raise HydraError("Dataset %s does not exist." % (dataset_id))

    if dataset is not None and dataset.created_by != user_id:
        owner = DBSession.query(DatasetOwner).filter(
            DatasetOwner.dataset_id == Dataset.dataset_id,
            DatasetOwner.user_id == user_id).first()
        if owner is None:
            raise PermissionError(
                "User %s is not an owner of dataset %s and therefore cannot clone it."
                % (user_id, dataset_id))

    DBSession.expunge(dataset)

    make_transient(dataset)

    dataset.data_name = dataset.data_name + "(Clone)"
    dataset.dataset_id = None
    dataset.cr_date = None

    #Try to avoid duplicate metadata entries if the entry has been cloned previously
    for m in dataset.metadata:
        if m.metadata_name in ("clone_of", "cloned_by"):
            del (m)

    cloned_meta = Metadata()
    cloned_meta.metadata_name = "clone_of"
    cloned_meta.metadata_val = str(dataset_id)
    dataset.metadata.append(cloned_meta)
    cloned_meta = Metadata()
    cloned_meta.metadata_name = "cloned_by"
    cloned_meta.metadata_val = str(user_id)
    dataset.metadata.append(cloned_meta)

    dataset.set_hash()
    DBSession.add(dataset)
    DBSession.flush()

    cloned_dataset = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset.dataset_id).first()

    return cloned_dataset
Ejemplo n.º 20
0
def convert_dataset(dataset_id, to_unit,**kwargs):
    """Convert a whole dataset (specified by 'dataset_id' to new unit
    ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function
    returns the dataset ID of new dataset.
    """

    ds_i = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()

    dataset_type = ds_i.data_type

    dsval = ds_i.get_val()
    old_unit = ds_i.data_units

    if old_unit is not None:
        if dataset_type == 'scalar':
            new_val = hydra_units.convert(float(dsval), old_unit, to_unit)
        elif dataset_type == 'array':
            dim = array_dim(dsval)
            vecdata = arr_to_vector(dsval)
            newvec = hydra_units.convert(vecdata, old_unit, to_unit)
            new_val = vector_to_arr(newvec, dim)
        elif dataset_type == 'timeseries':
            new_val = []
            for ts_time, ts_val in dsval.items():
                dim = array_dim(ts_val)
                vecdata = arr_to_vector(ts_val)
                newvec = hydra_units.convert(vecdata, old_unit, to_unit)
                newarr = vector_to_arr(newvec, dim)
                new_val.append(ts_time, newarr)
        elif dataset_type == 'descriptor':
            raise HydraError('Cannot convert descriptor.')
        
        new_dataset = Dataset()
        new_dataset.data_units = to_unit
        new_dataset.set_val(dataset_type, new_val)
        new_dataset.data_dimen = ds_i.data_dimen
        new_dataset.data_name  = ds_i.data_name
        new_dataset.data_type  = ds_i.data_type
        new_dataset.hidden     = 'N'
        new_dataset.set_metadata(ds_i.get_metadata_as_dict())
        new_dataset.set_hash()

        existing_ds = DBSession.query(Dataset).filter(Dataset.data_hash==new_dataset.data_hash).first()

        if existing_ds is not None:
            DBSession.expunge_all()
            return existing_ds.dataset_id
        
        DBSession.add(new_dataset)
        DBSession.flush()

        return new_dataset.dataset_id

    else:
        raise HydraError('Dataset has no units.')
Ejemplo n.º 21
0
def get_resource_data(ref_key, ref_id, scenario_id, type_id, **kwargs):
    """
        Get all the resource scenarios for a given resource
        in a given scenario. If type_id is specified, only
        return the resource scenarios for the attributes
        within the type.
    """

    user_id = kwargs.get('user_id')

    #THis can be either a single ID or list, so make them consistent
    if not isinstance(scenario_id, list):
        scenario_id = [scenario_id]

    resource_data_qry = DBSession.query(ResourceScenario).filter(
        ResourceScenario.dataset_id == Dataset.dataset_id,
        ResourceAttr.resource_attr_id == ResourceScenario.resource_attr_id,
        ResourceScenario.scenario_id.in_(scenario_id),
        ResourceAttr.ref_key == ref_key,
        or_(ResourceAttr.network_id == ref_id, ResourceAttr.node_id == ref_id,
            ResourceAttr.link_id == ref_id,
            ResourceAttr.group_id == ref_id)).distinct().options(
                joinedload('resourceattr')).options(
                    joinedload_all('dataset.metadata'))

    if type_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).filter(
            TypeAttr.type_id == type_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_data_qry = resource_data_qry.filter(
            ResourceAttr.attr_id.in_(attr_ids))

    resource_data = resource_data_qry.all()

    for rs in resource_data:
        try:
            rs.dataset.value = zlib.decompress(rs.dataset.value)
        except zlib.error:
            pass

        if rs.dataset.hidden == 'Y':
            try:
                rs.dataset.check_read_permission(user_id)
            except:
                rs.dataset.value = None
                rs.dataset.frequency = None
                rs.dataset.start_time = None

    DBSession.expunge_all()
    return resource_data
Ejemplo n.º 22
0
def clone_dataset(dataset_id,**kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))

    if dataset_id is None:
        return None

    dataset = DBSession.query(Dataset).filter(
            Dataset.dataset_id==dataset_id).options(joinedload_all('metadata')).first()

    if dataset is None:
        raise HydraError("Dataset %s does not exist."%(dataset_id))

    if dataset is not None and dataset.created_by != user_id:
        owner = DBSession.query(DatasetOwner).filter(
                                DatasetOwner.dataset_id==Dataset.dataset_id,
                                DatasetOwner.user_id==user_id).first()
        if owner is None:
            raise PermissionError("User %s is not an owner of dataset %s and therefore cannot clone it."%(user_id, dataset_id))

    DBSession.expunge(dataset)

    make_transient(dataset)

    dataset.data_name = dataset.data_name + "(Clone)"
    dataset.dataset_id = None
    dataset.cr_date = None

    #Try to avoid duplicate metadata entries if the entry has been cloned previously
    for m in dataset.metadata:
        if m.metadata_name in ("clone_of", "cloned_by"):
            del(m)

    cloned_meta = Metadata()
    cloned_meta.metadata_name = "clone_of"
    cloned_meta.metadata_val  = str(dataset_id)
    dataset.metadata.append(cloned_meta)
    cloned_meta = Metadata()
    cloned_meta.metadata_name = "cloned_by"
    cloned_meta.metadata_val  = str(user_id)
    dataset.metadata.append(cloned_meta)

    dataset.set_hash()
    DBSession.add(dataset)
    DBSession.flush()

    cloned_dataset = DBSession.query(Dataset).filter(
            Dataset.dataset_id==dataset.dataset_id).first()

    return cloned_dataset
Ejemplo n.º 23
0
def get_resource_data(ref_key, ref_id, scenario_id, type_id,**kwargs):
    """
        Get all the resource scenarios for a given resource
        in a given scenario. If type_id is specified, only
        return the resource scenarios for the attributes
        within the type.
    """
    
    user_id = kwargs.get('user_id')
    
    #THis can be either a single ID or list, so make them consistent
    if not isinstance(scenario_id, list):
        scenario_id = [scenario_id]

    resource_data_qry = DBSession.query(ResourceScenario).filter(
        ResourceScenario.dataset_id   == Dataset.dataset_id,
        ResourceAttr.resource_attr_id == ResourceScenario.resource_attr_id,
        ResourceScenario.scenario_id.in_(scenario_id),
        ResourceAttr.ref_key == ref_key,
        or_(
            ResourceAttr.network_id==ref_id,
            ResourceAttr.node_id==ref_id,
            ResourceAttr.link_id==ref_id,
            ResourceAttr.group_id==ref_id
        )).distinct().options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata'))

    if type_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_data_qry = resource_data_qry.filter(ResourceAttr.attr_id.in_(attr_ids))

    resource_data = resource_data_qry.all()

    for rs in resource_data:
        try:
            rs.dataset.value = zlib.decompress(rs.dataset.value)
        except zlib.error:
            pass

        if rs.dataset.hidden == 'Y':
           try:
                rs.dataset.check_read_permission(user_id)
           except:
               rs.dataset.value      = None
               rs.dataset.frequency  = None
               rs.dataset.start_time = None

    DBSession.expunge_all()
    return resource_data
Ejemplo n.º 24
0
def get_datasets(dataset_ids,**kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))
    datasets = []
    if len(dataset_ids) == 0:
        return []
    try:
        dataset_rs = DBSession.query(Dataset.dataset_id,
                Dataset.data_type,
                Dataset.data_units,
                Dataset.data_dimen,
                Dataset.data_name,
                Dataset.hidden,
                Dataset.cr_date,
                Dataset.created_by,
                DatasetOwner.user_id,
                null().label('metadata'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.start_time).label('start_time'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.frequency).label('frequency'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.value).label('value')).filter(
                Dataset.dataset_id.in_(dataset_ids)).outerjoin(DatasetOwner,
                                    and_(DatasetOwner.dataset_id==Dataset.dataset_id,
                                    DatasetOwner.user_id==user_id)).all()

        #convert the value row into a string as it is returned as a binary
        for dataset_row in dataset_rs:
            dataset_dict = dataset_row._asdict()

            if dataset_row.value is not None:
                dataset_dict['value'] = str(dataset_row.value)

            if dataset_row.hidden == 'N' or (dataset_row.hidden == 'Y' and dataset_row.user_id is not None):
                metadata = DBSession.query(Metadata).filter(Metadata.dataset_id == dataset_row.dataset_id).all()
                dataset_dict['metadata'] = metadata
            else:
                dataset_dict['metadata'] = []

            datasets.append(namedtuple('Dataset', dataset_dict.keys())(**dataset_dict))

            
    except NoResultFound:
        raise ResourceNotFoundError("Datasets not found.")

    return datasets
Ejemplo n.º 25
0
def get_all_resource_attributes(ref_key,
                                network_id,
                                template_id=None,
                                **kwargs):
    """
        Get all the resource attributes for a given resource type in the network.
        That includes all the resource attributes for a given type within the network.
        For example, if the ref_key is 'NODE', then it will return all the attirbutes
        of all nodes in the network. This function allows a front end to pre-load an entire
        network's resource attribute information to reduce on function calls.
        If type_id is specified, only
        return the resource attributes within the type.
    """

    user_id = kwargs.get('user_id')

    resource_attr_qry = DBSession.query(ResourceAttr).\
            outerjoin(Node, Node.node_id==ResourceAttr.node_id).\
            outerjoin(Link, Link.link_id==ResourceAttr.link_id).\
            outerjoin(ResourceGroup, ResourceGroup.group_id==ResourceAttr.group_id).filter(
        ResourceAttr.ref_key == ref_key,
        or_(
            and_(ResourceAttr.node_id != None,
                    ResourceAttr.node_id == Node.node_id,
                                        Node.network_id==network_id),

            and_(ResourceAttr.link_id != None,
                    ResourceAttr.link_id == Link.link_id,
                                        Link.network_id==network_id),

            and_(ResourceAttr.group_id != None,
                    ResourceAttr.group_id == ResourceGroup.group_id,
                                        ResourceGroup.network_id==network_id)
        ))

    if template_id is not None:
        attr_ids = []
        rs = DBSession.query(TypeAttr).join(
            TemplateType, TemplateType.type_id == TypeAttr.type_id).filter(
                TemplateType.template_id == template_id).all()
        for r in rs:
            attr_ids.append(r.attr_id)

        resource_attr_qry = resource_attr_qry.filter(
            ResourceAttr.attr_id.in_(attr_ids))

    resource_attrs = resource_attr_qry.all()

    return resource_attrs
Ejemplo n.º 26
0
def get_dataset(dataset_id,**kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))

    if dataset_id is None:
        return None
    try:
        dataset_rs = DBSession.query(Dataset.dataset_id,
                Dataset.data_type,
                Dataset.data_units,
                Dataset.data_dimen,
                Dataset.data_name,
                Dataset.hidden,
                Dataset.cr_date,
                Dataset.created_by,
                DatasetOwner.user_id,
                null().label('metadata'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.start_time).label('start_time'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.frequency).label('frequency'),
                case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
                        else_=Dataset.value).label('value')).filter(
                Dataset.dataset_id==dataset_id).outerjoin(DatasetOwner,
                                    and_(DatasetOwner.dataset_id==Dataset.dataset_id,
                                    DatasetOwner.user_id==user_id)).one()

        rs_dict = dataset_rs._asdict()

        #convert the value row into a string as it is returned as a binary
        if dataset_rs.value is not None:
            rs_dict['value'] = str(dataset_rs.value)

        if dataset_rs.hidden == 'N' or (dataset_rs.hidden == 'Y' and dataset_rs.user_id is not None):
            metadata = DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_id).all()
            rs_dict['metadata'] = metadata
        else:
            rs_dict['metadata'] = []

    except NoResultFound:
        raise HydraError("Dataset %s does not exist."%(dataset_id))


    dataset = namedtuple('Dataset', rs_dict.keys())(**rs_dict)
    
    return dataset
Ejemplo n.º 27
0
def add_attribute(attr, **kwargs):
    """
    Add a generic attribute, which can then be used in creating
    a resource attribute, and put into a type.

    .. code-block:: python

        (Attr){
            id = 1020
            name = "Test Attr"
            dimen = "very big"
        }

    """
    log.debug("Adding attribute: %s", attr.name)

    if attr.dimen is None or attr.dimen.lower() == 'dimensionless':
        log.info("Setting 'dimesionless' on attribute %s", attr.name)
        attr.dimen = 'dimensionless'

    try:
        attr_i = DBSession.query(Attr).filter(
            Attr.attr_name == attr.name, Attr.attr_dimen == attr.dimen).one()
        log.info("Attr already exists")
    except NoResultFound:
        attr_i = Attr(attr_name=attr.name, attr_dimen=attr.dimen)
        attr_i.attr_description = attr.description
        DBSession.add(attr_i)
        DBSession.flush()
        log.info("New attr added")
    return attr_i
Ejemplo n.º 28
0
def get_username(uid,**kwargs):
    rs = DBSession.query(User.username).filter(User.user_id==uid).one()
    
    if rs is None:
        raise ResourceNotFoundError("User with ID %s not found"%uid)

    return rs.username
Ejemplo n.º 29
0
def get_link_mappings(link_id, link_2_id=None, **kwargs):
    """
        Get all the resource attribute mappings in a network. If another network
        is specified, only return the mappings between the two networks.
    """
    qry = DBSession.query(ResourceAttrMap).filter(
        or_(
            and_(
                ResourceAttrMap.resource_attr_id_a ==
                ResourceAttr.resource_attr_id,
                ResourceAttr.link_id == link_id),
            and_(
                ResourceAttrMap.resource_attr_id_b ==
                ResourceAttr.resource_attr_id,
                ResourceAttr.link_id == link_id)))

    if link_2_id is not None:
        aliased_ra = aliased(ResourceAttr, name="ra2")
        qry = qry.filter(
            or_(
                and_(
                    ResourceAttrMap.resource_attr_id_a ==
                    aliased_ra.resource_attr_id,
                    aliased_ra.link_id == link_2_id),
                and_(
                    ResourceAttrMap.resource_attr_id_b ==
                    aliased_ra.resource_attr_id,
                    aliased_ra.link_id == link_2_id)))

    return qry.all()
Ejemplo n.º 30
0
def update_role(role,**kwargs):
    """
        Update the role.
        Used to add permissions and users to a role.
    """
    check_perm(kwargs.get('user_id'), 'edit_role')
    try:
        role_i = DBSession.query(Role).filter(Role.role_id==role.id).one()
        role_i.role_name = role.name
        role_i.role_code = role.code
    except NoResultFound:    
        raise ResourceNotFoundError("Role (role_id=%s) does not exist"%(role.id))

    for perm in role.permissions:
        _get_perm(perm.id)
        roleperm_i = RolePerm(role_id=role.id, 
                              perm_id=perm.id
                              )

        DBSession.add(roleperm_i)

    for user in role.users:
        _get_user(user.id)
        roleuser_i = RoleUser(user_id=user.id,
                                         perm_id=perm.id
                                        )

        DBSession.add(roleuser_i)

    DBSession.flush()
    return role_i
Ejemplo n.º 31
0
def _get_role(role_id,**kwargs):
    try:
        role_i = DBSession.query(Role).filter(Role.role_id==role_id).one()
    except NoResultFound:
        raise ResourceNotFoundError("Role %s does not exist"%role_id)

    return role_i
Ejemplo n.º 32
0
def get_link_mappings(link_id, link_2_id=None, **kwargs):
    """
        Get all the resource attribute mappings in a network. If another network
        is specified, only return the mappings between the two networks.
    """
    qry = DBSession.query(ResourceAttrMap).filter(
        or_(
            and_(
                ResourceAttrMap.resource_attr_id_a == ResourceAttr.resource_attr_id,
                ResourceAttr.link_id == link_id), 
            and_(
                ResourceAttrMap.resource_attr_id_b == ResourceAttr.resource_attr_id,
                ResourceAttr.link_id == link_id)))

    if link_2_id is not None:
        aliased_ra = aliased(ResourceAttr, name="ra2")
        qry = qry.filter(or_(
            and_(
                ResourceAttrMap.resource_attr_id_a == aliased_ra.resource_attr_id,
                aliased_ra.link_id == link_2_id), 
            and_(
                ResourceAttrMap.resource_attr_id_b == aliased_ra.resource_attr_id,
                aliased_ra.link_id == link_2_id)))
    
    return qry.all()
Ejemplo n.º 33
0
def _add_resourcegroupitem(group_item, scenario_id):
    """
        Add a single resource group item (no DB flush, as it's an internal function)
    """
    if group_item.id and group_item.id > 0:
        try:
            group_item_i = DBSession.query(ResourceGroupItem).filter(
                ResourceGroupItem.item_id == group_item.id).one()
        except NoResultFound:
            raise ResourceNotFoundError("ResourceGroupItem %s not found" %
                                        (group_item.id))

    else:
        group_item_i = ResourceGroupItem()
        group_item_i.group_id = group_item.group_id
        if scenario_id is not None:
            group_item_i.scenario_id = scenario_id

    ref_key = group_item.ref_key
    group_item_i.ref_key = ref_key
    if ref_key == 'NODE':
        group_item_i.node_id = group_item.ref_id
    elif ref_key == 'LINK':
        group_item_i.link_id = group_item.ref_id
    elif ref_key == 'GROUP':
        group_item_i.subgroup_id = group_item.ref_id
    DBSession.add(group_item_i)
    return group_item_i
Ejemplo n.º 34
0
def add_resource_attribute(resource_type, resource_id, attr_id, is_var,**kwargs):
    """
        Add a resource attribute attribute to a resource.

        attr_is_var indicates whether the attribute is a variable or not --
        this is used in simulation to indicate that this value is expected
        to be filled in by the simulator.
    """

    attr = DBSession.query(Attr).filter(Attr.attr_id==attr_id).first()

    if attr is None:
        raise ResourceNotFoundError("Attribute with ID %s does not exist."%attr_id)

    resource_i = _get_resource(resource_type, resource_id)

    for ra in resource_i.attributes:
        if ra.attr_id == attr_id:
            raise HydraError("Duplicate attribute. %s %s already has attribute %s"
                             %(resource_type, resource_i.get_name(), attr.attr_name))

    attr_is_var = 'Y' if is_var else 'N'

    new_ra = resource_i.add_attribute(attr_id, attr_is_var)
    DBSession.flush()

    return new_ra
Ejemplo n.º 35
0
def bulk_update_resourcedata(scenario_ids, resource_scenarios,**kwargs):
    """
        Update the data associated with a list of scenarios.
    """
    user_id = kwargs.get('user_id')
    res = None

    res = {}

    net_ids = DBSession.query(Scenario.network_id).filter(Scenario.scenario_id.in_(scenario_ids)).all()

    if len(set(net_ids)) != 1:
        raise HydraError("Scenario IDS are not in the same network")

    for scenario_id in scenario_ids:
        _check_can_edit_scenario(scenario_id, kwargs['user_id'])

        scen_i = _get_scenario(scenario_id, False, False)
        res[scenario_id] = []
        for rs in resource_scenarios:
            if rs.value is not None:
                updated_rs = _update_resourcescenario(scen_i, rs, user_id=user_id, source=kwargs.get('app_name'))
                res[scenario_id].append(updated_rs)
            else:
                _delete_resourcescenario(scenario_id, rs)

        DBSession.flush()

    return res
Ejemplo n.º 36
0
def _add_resourcegroupitem(group_item, scenario_id):
    """
        Add a single resource group item (no DB flush, as it's an internal function)
    """
    if group_item.id and group_item.id > 0:
        try:
            group_item_i = DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.item_id == group_item.id).one()
        except NoResultFound:
            raise ResourceNotFoundError("ResourceGroupItem %s not found" % (group_item.id))

    else:
        group_item_i = ResourceGroupItem()
        group_item_i.group_id = group_item.group_id
        if scenario_id is not None:
            group_item_i.scenario_id = scenario_id

    ref_key = group_item.ref_key
    group_item_i.ref_key = ref_key
    if ref_key == 'NODE':
        group_item_i.node_id =group_item.ref_id
    elif ref_key == 'LINK':
        group_item_i.link_id =group_item.ref_id
    elif ref_key == 'GROUP':
        group_item_i.subgroup_id =group_item.ref_id
    DBSession.add(group_item_i)
    return group_item_i
def _get_links(link_ids):

    qry = DBSession.query(Link).filter(
                        Link.link_id.in_(link_ids),
                    )

    return qry.all()
def _get_groups(group_ids):

    qry = DBSession.query(ResourceGroup).filter(
                        ResourceGroup.group_id.in_(group_ids),
                    )

    return qry.all()
Ejemplo n.º 39
0
def add_attribute(attr,**kwargs):
    """
    Add a generic attribute, which can then be used in creating
    a resource attribute, and put into a type.

    .. code-block:: python

        (Attr){
            id = 1020
            name = "Test Attr"
            dimen = "very big"
        }

    """
    log.debug("Adding attribute: %s", attr.name)

    if attr.dimen is None or attr.dimen.lower() == 'dimensionless':
        log.info("Setting 'dimesionless' on attribute %s", attr.name)
        attr.dimen = 'dimensionless'

    try:
        attr_i = DBSession.query(Attr).filter(Attr.attr_name == attr.name,
                                              Attr.attr_dimen == attr.dimen).one()
        log.info("Attr already exists")
    except NoResultFound:
        attr_i = Attr(attr_name = attr.name, attr_dimen = attr.dimen)
        attr_i.attr_description = attr.description
        DBSession.add(attr_i)
        DBSession.flush()
        log.info("New attr added")
    return attr_i
Ejemplo n.º 40
0
def _get_network(network_id):
    try:
        net_i = DBSession.query(Network).filter(
            Network.network_id == network_id).one()
        return net_i
    except NoResultFound:
        raise ResourceNotFoundError("Network %s not found" % (network_id))
Ejemplo n.º 41
0
def get_network_mappings(network_id, network_2_id=None, **kwargs):
    """
        Get all the mappings of network resource attributes, NOT ALL THE MAPPINGS
        WITHIN A NETWORK. For that, ``use get_mappings_in_network``. If another network
        is specified, only return the mappings between the two networks.
    """
    qry = DBSession.query(ResourceAttrMap).filter(
        or_(
            and_(
                ResourceAttrMap.resource_attr_id_a == ResourceAttr.resource_attr_id,
                ResourceAttr.network_id == network_id), 
            and_(
                ResourceAttrMap.resource_attr_id_b == ResourceAttr.resource_attr_id,
                ResourceAttr.network_id == network_id)))

    if network_2_id is not None:
        aliased_ra = aliased(ResourceAttr, name="ra2")
        qry = qry.filter(or_(
            and_(
                ResourceAttrMap.resource_attr_id_a == aliased_ra.resource_attr_id,
                aliased_ra.network_id == network_2_id), 
            and_(
                ResourceAttrMap.resource_attr_id_b == aliased_ra.resource_attr_id,
                aliased_ra.network_id == network_2_id)))
    
    return qry.all()
Ejemplo n.º 42
0
def get_network_mappings(network_id, network_2_id=None, **kwargs):
    """
        Get all the mappings of network resource attributes, NOT ALL THE MAPPINGS
        WITHIN A NETWORK. For that, ``use get_mappings_in_network``. If another network
        is specified, only return the mappings between the two networks.
    """
    qry = DBSession.query(ResourceAttrMap).filter(
        or_(
            and_(
                ResourceAttrMap.resource_attr_id_a ==
                ResourceAttr.resource_attr_id,
                ResourceAttr.network_id == network_id),
            and_(
                ResourceAttrMap.resource_attr_id_b ==
                ResourceAttr.resource_attr_id,
                ResourceAttr.network_id == network_id)))

    if network_2_id is not None:
        aliased_ra = aliased(ResourceAttr, name="ra2")
        qry = qry.filter(
            or_(
                and_(
                    ResourceAttrMap.resource_attr_id_a ==
                    aliased_ra.resource_attr_id,
                    aliased_ra.network_id == network_2_id),
                and_(
                    ResourceAttrMap.resource_attr_id_b ==
                    aliased_ra.resource_attr_id,
                    aliased_ra.network_id == network_2_id)))

    return qry.all()
Ejemplo n.º 43
0
def get_dataset_collection_by_name(collection_name,**kwargs):
    try:
        collection = DBSession.query(DatasetCollection).filter(DatasetCollection.collection_name==collection_name).one()
    except NoResultFound:
        raise ResourceNotFoundError("No dataset collection found with name %s"%collection_name)

    return collection
Ejemplo n.º 44
0
    def get_all_resource_attr_collections(ctx):
        """
            Get all resource attribute collections
        """
        collections_i = DBSession.query(ResourceAttrCollection).all()

        return [HydraResourceAttrCollection(collection_i) for collection_i in collections_i] 
Ejemplo n.º 45
0
def _get_dataset(dataset_id):
    try:
        dataset_i = DBSession.query(Dataset).filter(
            Dataset.dataset_id == dataset_id).one()
        return dataset_i
    except NoResultFound:
        raise ResourceNotFoundError("Dataset %s not found" % (dataset_id))
Ejemplo n.º 46
0
def add_resource_attribute(resource_type, resource_id, attr_id, is_var,
                           **kwargs):
    """
        Add a resource attribute attribute to a resource.

        attr_is_var indicates whether the attribute is a variable or not --
        this is used in simulation to indicate that this value is expected
        to be filled in by the simulator.
    """

    attr = DBSession.query(Attr).filter(Attr.attr_id == attr_id).first()

    if attr is None:
        raise ResourceNotFoundError("Attribute with ID %s does not exist." %
                                    attr_id)

    resource_i = _get_resource(resource_type, resource_id)

    for ra in resource_i.attributes:
        if ra.attr_id == attr_id:
            raise HydraError(
                "Duplicate attribute. %s %s already has attribute %s" %
                (resource_type, resource_i.get_name(), attr.attr_name))

    attr_is_var = 'Y' if is_var else 'N'

    new_ra = resource_i.add_attribute(attr_id, attr_is_var)
    DBSession.flush()

    return new_ra
Ejemplo n.º 47
0
def _get_user(user_id, **kwargs):
    try:
        user_i = DBSession.query(User).filter(User.user_id==user_id).one()
    except NoResultFound:
        raise ResourceNotFoundError("User %s does not exist"%user_id)

    return user_i
Ejemplo n.º 48
0
def _get_attr(attr_id):
    try:
        attr = DBSession.query(Attr).filter(Attr.attr_id == attr_id).one()
        return attr
    except NoResultFound:
        raise ResourceNotFoundError("Attribute with ID %s not found" %
                                    (attr_id, ))
Ejemplo n.º 49
0
def _get_perm(perm_id,**kwargs):
    try:
        perm_i = DBSession.query(Perm).filter(Perm.perm_id==perm_id).one()
    except NoResultFound:
        raise ResourceNotFoundError("Permission %s does not exist"%perm_id)

    return perm_i
Ejemplo n.º 50
0
def delete_dataset(dataset_id,**kwargs):
    """
        Removes a piece of data from the DB.
        CAUTION! Use with care, as this cannot be undone easily.
    """
    try:
        d = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
    except NoResultFound:
        raise HydraError("Dataset %s does not exist."%dataset_id)

    dataset_rs = DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==dataset_id).all()
    if len(dataset_rs) > 0:
        raise HydraError("Cannot delete %s. Dataset is used by resource scenarios."%dataset_id)

    DBSession.delete(d)
    DBSession.flush()
Ejemplo n.º 51
0
def get_username(uid,**kwargs):
    rs = DBSession.query(User.username).filter(User.user_id==uid).one()
    
    if rs is None:
        raise ResourceNotFoundError("User with ID %s not found"%uid)

    return rs.username
Ejemplo n.º 52
0
def check_attr_dimension(attr_id, **kwargs):
    """
        Check that the dimension of the resource attribute data is consistent
        with the definition of the attribute.
        If the attribute says 'volume', make sure every dataset connected
        with this attribute via a resource attribute also has a dimension
        of 'volume'.
    """
    attr_i = _get_attr(attr_id)

    datasets = DBSession.query(Dataset).filter(
        Dataset.dataset_id == ResourceScenario.dataset_id,
        ResourceScenario.resource_attr_id == ResourceAttr.resource_attr_id,
        ResourceAttr.attr_id == attr_id).all()
    bad_datasets = []
    for d in datasets:
        if d.data_dimen != attr_i.attr_dimen:
            bad_datasets.append(d.dataset_id)

    if len(bad_datasets) > 0:
        raise HydraError(
            "Datasets %s have a different dimension to attribute %s" %
            (bad_datasets, attr_id))

    return 'OK'
Ejemplo n.º 53
0
def bulk_update_resourcedata(scenario_ids, resource_scenarios, **kwargs):
    """
        Update the data associated with a list of scenarios.
    """
    user_id = kwargs.get('user_id')
    res = None

    res = {}

    net_ids = DBSession.query(Scenario.network_id).filter(
        Scenario.scenario_id.in_(scenario_ids)).all()

    if len(set(net_ids)) != 1:
        raise HydraError("Scenario IDS are not in the same network")

    for scenario_id in scenario_ids:
        _check_can_edit_scenario(scenario_id, kwargs['user_id'])

        scen_i = _get_scenario(scenario_id, False, False)
        res[scenario_id] = []
        for rs in resource_scenarios:
            if rs.value is not None:
                updated_rs = _update_resourcescenario(
                    scen_i, rs, user_id=user_id, source=kwargs.get('app_name'))
                res[scenario_id].append(updated_rs)
            else:
                _delete_resourcescenario(scenario_id, rs)

        DBSession.flush()

    return res
Ejemplo n.º 54
0
def get_dataset_scenarios(dataset_id, **kwargs):
    try:
        DBSession.query(Dataset).filter(Dataset.dataset_id == dataset_id).one()
    except NoResultFound:
        raise ResourceNotFoundError("Dataset %s not found" % dataset_id)

    log.info("dataset %s exists", dataset_id)

    scenarios = DBSession.query(Scenario).filter(
        Scenario.status == 'A',
        ResourceScenario.scenario_id == Scenario.scenario_id,
        ResourceScenario.dataset_id == dataset_id).distinct().all()

    log.info("%s scenarios retrieved", len(scenarios))

    return scenarios
Ejemplo n.º 55
0
def get_scenario_data(scenario_id, **kwargs):
    """
        Get all the datasets from the group with the specified name
        @returns a list of dictionaries
    """
    user_id = kwargs.get('user_id')

    scenario_data = DBSession.query(Dataset).filter(
        Dataset.dataset_id == ResourceScenario.dataset_id,
        ResourceScenario.scenario_id == scenario_id).options(
            joinedload_all('metadata')).distinct().all()

    for sd in scenario_data:
        if sd.hidden == 'Y':
            try:
                sd.check_read_permission(user_id)
            except:
                sd.value = None
                sd.frequency = None
                sd.start_time = None
                sd.metadata = []

    DBSession.expunge_all()

    log.info("Retrieved %s datasets", len(scenario_data))
    return scenario_data
Ejemplo n.º 56
0
def _get_template(template_id):
    try:
        tmpl_i = DBSession.query(Template).filter(
            Template.template_id == template_id).one()
        return tmpl_i
    except NoResultFound:
        raise ResourceNotFoundError("Network %s not found" % (template_id))
Ejemplo n.º 57
0
def _get_project(project_id):
    try:
        project = DBSession.query(Project).filter(
            Project.project_id == project_id).one()
        return project
    except NoResultFound:
        raise ResourceNotFoundError("Project %s not found" % (project_id))
Ejemplo n.º 58
0
def get_networks(project_id,
                 include_resources=True,
                 summary=False,
                 include_data='N',
                 **kwargs):
    """
        Get all networks in a project
        Returns an array of network objects.
    """
    log.info("Getting networks for project %s", project_id)
    user_id = kwargs.get('user_id')
    project = _get_project(project_id)
    project.check_read_permission(user_id)

    rs = DBSession.query(
        Network.network_id,
        Network.status).filter(Network.project_id == project_id).all()
    networks = []
    for r in rs:
        if r.status != 'A':
            continue
        try:
            net = network.get_network(r.network_id,
                                      include_resources=include_resources,
                                      summary=summary,
                                      include_data=include_data,
                                      **kwargs)
            log.info("Network %s retrieved", net.network_name)
            networks.append(net)
        except PermissionError:
            log.info("Not returning network %s as user %s does not have "
                     "permission to read it." % (r.network_id, user_id))

    return networks
Ejemplo n.º 59
0
def get_rules(scenario_id, **kwargs):
    """
        Get all the rules for a given scenario.
    """
    rules = DBSession.query(Rule).filter(Rule.scenario_id==scenario_id, Rule.status=='A').all()

    return rules
Ejemplo n.º 60
0
def assign_value(rs, data_type, val,
                 units, name, dimension, metadata={}, data_hash=None, user_id=None, source=None):
    """
        Insert or update a piece of data in a scenario.
        If the dataset is being shared by other resource scenarios, a new dataset is inserted.
        If the dataset is ONLY being used by the resource scenario in question, the dataset
        is updated to avoid unnecessary duplication.
    """

    log.debug("Assigning value %s to rs %s in scenario %s",
              name, rs.resource_attr_id, rs.scenario_id)

    if rs.scenario.locked == 'Y':
        raise PermissionError("Cannot assign value. Scenario %s is locked"
                             %(rs.scenario_id))

    #Check if this RS is the only RS in the DB connected to this dataset.
    #If no results is found, the RS isn't in the DB yet, so the condition is false.
    update_dataset = False # Default behaviour is to create a new dataset.

    if rs.dataset is not None:

        #Has this dataset changed?
        if rs.dataset.data_hash == data_hash:
            log.debug("Dataset has not changed. Returning.")
            return

        connected_rs = DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==rs.dataset.dataset_id).all()
        #If there's no RS found, then the incoming rs is new, so the dataset can be altered
        #without fear of affecting something else.
        if len(connected_rs) == 0:
        #If it's 1, the RS exists in the DB, but it's the only one using this dataset or
        #The RS isn't in the DB yet and the datset is being used by 1 other RS.
            update_dataset = True

        if len(connected_rs) == 1 :
            if connected_rs[0].scenario_id == rs.scenario_id and connected_rs[0].resource_attr_id==rs.resource_attr_id:
                update_dataset = True
        else:
            update_dataset=False

    if update_dataset is True:
        log.info("Updating dataset '%s'", name)
        dataset = data.update_dataset(rs.dataset.dataset_id, name, data_type, val, units, dimension, metadata, **dict(user_id=user_id))
        rs.dataset = dataset
        rs.dataset_id = dataset.dataset_id
    else:
        log.info("Creating new dataset %s in scenario %s", name, rs.scenario_id)
        dataset = data.add_dataset(data_type,
                                val,
                                units,
                                dimension,
                                metadata=metadata,
                                name=name,
                                **dict(user_id=user_id))
        rs.dataset = dataset
        rs.source  = source

    DBSession.flush()