def get_group_dataset_matrix(ctx, group_ids, attribute_ids, scenario_ids):
        """
            Given a list of resources, attributes and scenarios return a matrix
            of datasets. If a resource doesn't have an attribute, return None.
            If a resource has an attribute but no dataset, return None.

        """
        if len(scenario_ids) == 0:
            raise HydraError("No scenarios specified!")
        if len(attribute_ids) == 0:
            raise HydraError("No attributes specified!")
        if len(group_ids) == 0:
            raise HydraError("No resources specified")

        groups = _get_groups(group_ids)

        resource_attrs = _get_resource_attributes('GROUP', group_ids,
                                                  attribute_ids)

        data = _get_data('GROUP', group_ids, attribute_ids, scenario_ids)

        group_attr_dict = get_attr_dict('GROUP', scenario_ids, group_ids,
                                        attribute_ids, groups, resource_attrs,
                                        data)

        returned_matrix = [
            GroupDatasetMatrix(scenario_id, data)
            for scenario_id, data in group_attr_dict.items()
        ]

        return returned_matrix
示例#2
0
def get_resourcescenarios(resource_attr_ids, scenario_ids, **kwargs):
    """
        Retrieve all the datasets in a scenario for a given attribute.
        Also return the resource attributes so there is a reference to the node/link
    """

    #Make sure the resource_attr_ids are valid
    check_ra_qry = DBSession.query(ResourceAttr).filter(
        ResourceAttr.resource_attr_id.in_(resource_attr_ids)).all()
    if len(check_ra_qry) != len(resource_attr_ids):
        raise HydraError(
            "Unrecognised resource attribues %s were found in list" %
            (resource_attr_ids, ))

    #Make sure the scenario ids are valid
    scen_qry = DBSession.query(Scenario).filter(
        Scenario.scenario_id.in_(scenario_ids)).all()
    if len(scen_qry) != len(scenario_ids):
        raise HydraError(
            "Unrecognised resource attribues %s were found in list" %
            (scenario_ids, ))

    rs_result = DBSession.query(ResourceScenario).filter(
        ResourceScenario.scenario_id.in_(scenario_ids),
        ResourceScenario.resource_attr_id.in_(resource_attr_ids)).all()

    return rs_result
    def get_link_dataset_matrix(ctx, link_ids, attribute_ids, scenario_ids):
        """
            Given a list of resources, attributes and scenarios return a matrix
            of datasets. If a resource doesn't have an attribute, return None.
            If a resource has an attribute but no dataset, return None.

        """

        if len(scenario_ids) == 0:
            raise HydraError("No scenarios specified!")
        if len(attribute_ids) == 0:
            raise HydraError("No attributes specified!")
        if len(link_ids) == 0:
            raise HydraError("No resources specified")

        links = _get_links(link_ids)

        resource_attrs = _get_resource_attributes('LINK', link_ids,
                                                  attribute_ids)

        data = _get_data('LINK', link_ids, attribute_ids, scenario_ids)

        link_attr_dict = get_attr_dict('LINK', scenario_ids, link_ids,
                                       attribute_ids, links, resource_attrs,
                                       data)

        returned_matrix = [
            LinkDatasetMatrix(scenario_id, data)
            for scenario_id, data in link_attr_dict.items()
        ]

        return returned_matrix
    def get_node_dataset_matrix(ctx, node_ids, attribute_ids, scenario_ids):
        """
            Given a list of resources, attributes and scenarios return a matrix
            of datasets. If a resource doesn't have an attribute, return None.
            If a resource has an attribute but no dataset, return None.

        """

        if len(scenario_ids) == 0:
            raise HydraError("No scenarios specified!")
        if len(attribute_ids) == 0:
            raise HydraError("No attributes specified!")
        if len(node_ids) == 0:
            raise HydraError("No resources specified")

        nodes = _get_nodes(node_ids)

        resource_attrs = _get_resource_attributes('NODE', node_ids,
                                                  attribute_ids)

        data = _get_data('NODE', node_ids, attribute_ids, scenario_ids)

        #group the data by scenario
        node_attr_dict = get_attr_dict('NODE', scenario_ids, node_ids,
                                       attribute_ids, nodes, resource_attrs,
                                       data)

        returned_matrix = [
            NodeDatasetMatrix(scenario_id, data)
            for scenario_id, data in node_attr_dict.items()
        ]

        return returned_matrix
示例#5
0
def update_rule(rule, **kwargs):
    rule_i = _get_rule(rule.id)

    if rule.ref_key != rule_i.ref_key:
        raise HydraError("Cannot convert a %s rule to a %s rule. Please create a new rule instead."%(rule_i.ref_key, rule.ref_key))

    if rule.ref_key == 'NETWORK':
        rule_i.network_id = rule.ref_id
    elif rule.ref_key == 'NODE':
        rule_i.node_id = rule.ref_id
    elif rule.ref_key == 'LINK':
        rule_i.link_id = rule.ref_id
    elif rule.ref_key == 'GROUP':
        rule_i.group_id = rule.group_id
    else:
        raise HydraError("Ref Key %s not recognised.")

    rule_i.scenario_id = rule.scenario_id
    rule_i.rule_name   = rule.name
    rule_i.rule_description = rule.description

    rule_i.rule_text = rule.text

    DBSession.flush()

    return rule_i
示例#6
0
def get_vals_between_times(dataset_id, start_time, end_time, timestep,increment,**kwargs):
    """
        Retrive data between two specified times within a timeseries. The times
        need not be specified in the timeseries. This function will 'fill in the blanks'.

        Two types of data retrieval can be done.

        If the timeseries is timestamp-based, then start_time and end_time
        must be datetimes and timestep must be specified (minutes, seconds etc).
        'increment' reflects the size of the timestep -- timestep = 'minutes' and increment = 2
        means 'every 2 minutes'.

        If the timeseries is float-based (relative), then start_time and end_time
        must be decimal values. timestep is ignored and 'increment' represents the increment
        to be used between the start and end.
        Ex: start_time = 1, end_time = 5, increment = 1 will get times at 1, 2, 3, 4, 5
    """
    try:
        server_start_time = get_datetime(start_time)
        server_end_time   = get_datetime(end_time)
        times = [server_start_time]

        next_time = server_start_time
        while next_time < server_end_time:
            if int(increment) == 0:
                raise HydraError("%s is not a valid increment for this search."%increment)
            next_time = next_time  + datetime.timedelta(**{timestep:int(increment)})
            times.append(next_time)
    except ValueError:
        try:
            server_start_time = Decimal(start_time)
            server_end_time   = Decimal(end_time)
            times = [server_start_time]

            next_time = server_start_time
            while next_time < server_end_time:
                next_time = next_time + increment
                times.append(next_time)
        except:
            raise HydraError("Unable to get times. Please check to and from times.")

    td = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
    log.debug("Number of times to fetch: %s", len(times))
    data = td.get_val(timestamp=times)

    data_to_return = []
    if type(data) is list:
        for d in data:
            if d is not None:
                data_to_return.append(list(d))
    elif data is None:
        data_to_return = []
    else:
        data_to_return.append(data)

    dataset = {'data' : json.dumps(data_to_return)}

    return dataset
示例#7
0
def convert_dataset(dataset_id, to_unit, **kwargs):
    """Convert a whole dataset (specified by 'dataset_id' to new unit
    ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function
    returns the dataset ID of new dataset.
    """

    ds_i = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset_id).one()

    dataset_type = ds_i.data_type

    dsval = ds_i.get_val()
    old_unit = ds_i.data_units

    if old_unit is not None:
        if dataset_type == 'scalar':
            new_val = hydra_units.convert(float(dsval), old_unit, to_unit)
        elif dataset_type == 'array':
            dim = array_dim(dsval)
            vecdata = arr_to_vector(dsval)
            newvec = hydra_units.convert(vecdata, old_unit, to_unit)
            new_val = vector_to_arr(newvec, dim)
        elif dataset_type == 'timeseries':
            new_val = []
            for ts_time, ts_val in dsval.items():
                dim = array_dim(ts_val)
                vecdata = arr_to_vector(ts_val)
                newvec = hydra_units.convert(vecdata, old_unit, to_unit)
                newarr = vector_to_arr(newvec, dim)
                new_val.append(ts_time, newarr)
        elif dataset_type == 'descriptor':
            raise HydraError('Cannot convert descriptor.')

        new_dataset = Dataset()
        new_dataset.data_units = to_unit
        new_dataset.set_val(dataset_type, new_val)
        new_dataset.data_dimen = ds_i.data_dimen
        new_dataset.data_name = ds_i.data_name
        new_dataset.data_type = ds_i.data_type
        new_dataset.hidden = 'N'
        new_dataset.set_metadata(ds_i.get_metadata_as_dict())
        new_dataset.set_hash()

        existing_ds = DBSession.query(Dataset).filter(
            Dataset.data_hash == new_dataset.data_hash).first()

        if existing_ds is not None:
            DBSession.expunge_all()
            return existing_ds.dataset_id

        DBSession.add(new_dataset)
        DBSession.flush()

        return new_dataset.dataset_id

    else:
        raise HydraError('Dataset has no units.')
示例#8
0
def login_user(username, password):
    try:
        user_i = DBSession.query(User).filter(User.username == username).one()
    except NoResultFound:
        raise HydraError(username)

    if bcrypt.hashpw(password.encode('utf-8'), user_i.password.encode(
            'utf-8')) == user_i.password.encode('utf-8'):
        user_i.last_login = datetime.datetime.now()
        return user_i.user_id
    else:
        raise HydraError(username)
示例#9
0
    def login(ctx, username, password):
        try:

            if username is None:
                raise HydraError("No Username specified.")
            username = username.encode('utf-8')

            if password is None:
                raise HydraError("No password specified")
            password = password.encode('utf-8')

            user_id = login_user(username, password)
        except HydraError, e:
            raise AuthenticationError(e)
示例#10
0
def _get_db_val(data_type, val):
    if data_type in ('descriptor', 'scalar'):
        return str(val)
    elif data_type in ('timeseries', 'array'):
        return val
    else:
        raise HydraError("Invalid data type %s" % (data_type, ))
示例#11
0
    def update_unit(self, dimension, unit):
        """Update a unit in the custom file. Please note that units in the
        built-in file can not be updated.
        """
        if dimension in self.dimensions.keys() and \
                unit['abbr'] in self.userunits:

            # update internal variables
            self.dimensions[dimension].append(unit['abbr'])
            self.dimensions_full[dimension].append(unit)
            self.units.update({unit['abbr']:
                               (float(unit['lf']), float(unit['cf']))})
            self.unit_description.update({unit['abbr']: unit['name']})
            # Update XML tree
            if 'info' not in unit.keys() or unit['info'] is None:
                unit['info'] = ''
            element_index = None
            for i, element in enumerate(self.usertree):
                if element.get('name') == dimension:
                    element_index = i
                    break
            if element_index is not None:
                for unit_element in self.usertree[element_index]:
                    if unit_element.get('abbr') == unit['abbr']:
                        self.usertree[element_index].remove(unit_element)
                self.usertree[element_index].append(
                    etree.Element('unit', name=unit['name'], abbr=unit['abbr'],
                                  lf=str(unit['lf']), cf=str(unit['cf']),
                                  info=unit['info']))
                return True
            else:
                return False
        else:
            raise HydraError('Unit %s with dimension %s not found.'%(unit,dimension))
示例#12
0
def bulk_update_resourcedata(scenario_ids, resource_scenarios, **kwargs):
    """
        Update the data associated with a list of scenarios.
    """
    user_id = kwargs.get('user_id')
    res = None

    res = {}

    net_ids = DBSession.query(Scenario.network_id).filter(
        Scenario.scenario_id.in_(scenario_ids)).all()

    if len(set(net_ids)) != 1:
        raise HydraError("Scenario IDS are not in the same network")

    for scenario_id in scenario_ids:
        _check_can_edit_scenario(scenario_id, kwargs['user_id'])

        scen_i = _get_scenario(scenario_id, False, False)
        res[scenario_id] = []
        for rs in resource_scenarios:
            if rs.value is not None:
                updated_rs = _update_resourcescenario(
                    scen_i, rs, user_id=user_id, source=kwargs.get('app_name'))
                res[scenario_id].append(updated_rs)
            else:
                _delete_resourcescenario(scenario_id, rs)

        DBSession.flush()

    return res
示例#13
0
def hide_dataset(dataset_id, exceptions, read, write, share, **kwargs):
    """
        Hide a particular piece of data so it can only be seen by its owner.
        Only an owner can hide (and unhide) data.
        Data with no owner cannot be hidden.

        The exceptions paramater lists the usernames of those with permission to view the data
        read, write and share indicate whether these users can read, edit and share this data.
    """

    user_id = kwargs.get('user_id')
    dataset_i = _get_dataset(dataset_id)
    #check that I can hide the dataset
    if dataset_i.created_by != int(user_id):
        raise HydraError('Permission denied. '
                         'User %s is not the owner of dataset %s' %
                         (user_id, dataset_i.data_name))

    dataset_i.hidden = 'Y'
    if exceptions is not None:
        for username in exceptions:
            user_i = _get_user(username)
            dataset_i.set_owner(user_i.user_id,
                                read=read,
                                write=write,
                                share=share)
    DBSession.flush()
示例#14
0
def check_attr_dimension(attr_id, **kwargs):
    """
        Check that the dimension of the resource attribute data is consistent
        with the definition of the attribute.
        If the attribute says 'volume', make sure every dataset connected
        with this attribute via a resource attribute also has a dimension
        of 'volume'.
    """
    attr_i = _get_attr(attr_id)

    datasets = DBSession.query(Dataset).filter(
        Dataset.dataset_id == ResourceScenario.dataset_id,
        ResourceScenario.resource_attr_id == ResourceAttr.resource_attr_id,
        ResourceAttr.attr_id == attr_id).all()
    bad_datasets = []
    for d in datasets:
        if d.data_dimen != attr_i.attr_dimen:
            bad_datasets.append(d.dataset_id)

    if len(bad_datasets) > 0:
        raise HydraError(
            "Datasets %s have a different dimension to attribute %s" %
            (bad_datasets, attr_id))

    return 'OK'
示例#15
0
def add_resource_attribute(resource_type, resource_id, attr_id, is_var,
                           **kwargs):
    """
        Add a resource attribute attribute to a resource.

        attr_is_var indicates whether the attribute is a variable or not --
        this is used in simulation to indicate that this value is expected
        to be filled in by the simulator.
    """

    attr = DBSession.query(Attr).filter(Attr.attr_id == attr_id).first()

    if attr is None:
        raise ResourceNotFoundError("Attribute with ID %s does not exist." %
                                    attr_id)

    resource_i = _get_resource(resource_type, resource_id)

    for ra in resource_i.attributes:
        if ra.attr_id == attr_id:
            raise HydraError(
                "Duplicate attribute. %s %s already has attribute %s" %
                (resource_type, resource_i.get_name(), attr.attr_name))

    attr_is_var = 'Y' if is_var else 'N'

    new_ra = resource_i.add_attribute(attr_id, attr_is_var)
    DBSession.flush()

    return new_ra
示例#16
0
def delete_dataset(dataset_id,**kwargs):
    """
        Removes a piece of data from the DB.
        CAUTION! Use with care, as this cannot be undone easily.
    """
    try:
        d = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
    except NoResultFound:
        raise HydraError("Dataset %s does not exist."%dataset_id)

    dataset_rs = DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==dataset_id).all()
    if len(dataset_rs) > 0:
        raise HydraError("Cannot delete %s. Dataset is used by resource scenarios."%dataset_id)

    DBSession.delete(d)
    DBSession.flush()
示例#17
0
def make_root_user():

    try:
        user = DBSession.query(User).filter(User.username == 'root').one()
    except NoResultFound:
        user = User(username='******',
                    password=bcrypt.hashpw('', bcrypt.gensalt()),
                    display_name='Root User')
        DBSession.add(user)

    try:
        role = DBSession.query(Role).filter(Role.role_code == 'admin').one()
    except NoResultFound:
        raise HydraError("Admin role not found.")

    try:
        userrole = DBSession.query(RoleUser).filter(
            RoleUser.role_id == role.role_id,
            RoleUser.user_id == user.user_id).one()
    except NoResultFound:
        userrole = RoleUser(role_id=role.role_id, user_id=user.user_id)
        user.roleusers.append(userrole)
        DBSession.add(userrole)
    DBSession.flush()
    transaction.commit()
示例#18
0
def set_network_permission(network_id, usernames, read, write, share,
                           **kwargs):
    """
        Set permissions on a network to a list of users, identifed by
        their usernames. The read flag ('Y' or 'N') sets read access, the write
        flag sets write access. If the read flag is 'N', then there is
        automatically no write access or share access.
    """

    user_id = kwargs.get('user_id')

    net_i = _get_network(network_id)

    #Check if the user is allowed to share this network.
    net_i.check_share_permission(user_id)

    #You cannot edit something you cannot see.
    if read == 'N':
        write = 'N'
        share = 'N'

    for username in usernames:

        user_i = _get_user(username)

        #The creator of a network must always have read and write access
        #to their project
        if net_i.created_by == user_i.user_id:
            raise HydraError("Cannot set permissions on network %s"
                             " for user %s as tis user is the creator." %
                             (network_id, username))

        net_i.set_owner(user_i.user_id, read=read, write=write, share=share)
    DBSession.flush()
示例#19
0
def update_dataset(dataset_id, name, data_type, val, units, dimension, metadata={}, **kwargs):
    """
        Update an existing dataset
    """

    if dataset_id is None:
        raise HydraError("Dataset must have an ID to be updated.")

    user_id = kwargs.get('user_id')

    dataset = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
    #This dataset been seen before, so it may be attached
    #to other scenarios, which may be locked. If they are locked, we must
    #not change their data, so new data must be created for the unlocked scenarios
    locked_scenarios = []
    unlocked_scenarios = []
    for dataset_rs in dataset.resourcescenarios:
        if dataset_rs.scenario.locked == 'Y':
            locked_scenarios.append(dataset_rs)
        else:
            unlocked_scenarios.append(dataset_rs)

    #Are any of these scenarios locked?
    if len(locked_scenarios) > 0:
        #If so, create a new dataset and assign to all unlocked datasets.
        dataset = add_dataset(data_type,
                                val,
                                units,
                                dimension,
                                metadata=metadata,
                                name=name,
                                user_id=kwargs['user_id'])
        for unlocked_rs in unlocked_scenarios:
            unlocked_rs.dataset = dataset

    else:

        dataset.set_val(data_type, val)

        dataset.set_metadata(metadata)

        dataset.data_type  = data_type
        dataset.data_units = units
        dataset.data_name  = name
        dataset.data_dimen = dimension
        dataset.created_by = kwargs['user_id']
        dataset.data_hash  = dataset.set_hash()

        #Is there a dataset in the DB already which is identical to the updated dataset?
        existing_dataset = DBSession.query(Dataset).filter(Dataset.data_hash==dataset.data_hash, Dataset.dataset_id != dataset.dataset_id).first()
        if existing_dataset is not None and existing_dataset.check_user(user_id):
            log.warn("An identical dataset %s has been found to dataset %s."
                     " Deleting dataset and returning dataset %s",
                     existing_dataset.dataset_id, dataset.dataset_id, existing_dataset.dataset_id)
            DBSession.delete(dataset)
            dataset = existing_dataset

    return dataset
示例#20
0
def add_resourcegroupitem(group_item, scenario_id, **kwargs):

    scenario._check_can_edit_scenario(scenario_id, kwargs['user_id'])
    #Check whether the ref_id is correct.

    if group_item.ref_key == 'NODE':
        try:
            DBSession.query(Node).filter(
                Node.node_id == group_item.ref_id).one()

        except NoResultFound:
            raise HydraError("Invalid ref ID %s for a Node group item!" %
                             (group_item.ref_id))
    elif group_item.ref_key == 'LINK':
        try:
            DBSession.query(Link).filter(
                Link.link_id == group_item.ref_id).one()
        except NoResultFound:
            raise HydraError("Invalid ref ID %s for a Link group item!" %
                             (group_item.ref_id))
    elif group_item.ref_key == 'GROUP':
        try:
            DBSession.query(ResourceGroup).filter(
                ResourceGroup.group_id == group_item.ref_id).one()
        except NoResultFound:
            raise HydraError("Invalid ref ID %s for a Group group item!" %
                             (group_item.ref_id))
    else:
        raise HydraError("Invalid ref key: %s" % (group_item.ref_key))

    group_item_i = ResourceGroupItem()
    group_item_i.scenario_id = scenario_id
    group_item_i.group_id = group_item.group_id
    group_item_i.ref_key = group_item.ref_key
    if group_item.ref_key == 'NODE':
        group_item_i.node_id = group_item.ref_id
    elif group_item.ref_key == 'LINK':
        group_item_i.link_id = group_item.ref_id
    elif group_item.ref_key == 'GROUP':
        group_item_i.subgroup_id = group_item.ref_id

    DBSession.add(group_item_i)
    DBSession.flush()

    return group_item_i
示例#21
0
    def get_dimension(self, unit):
        """Return the physical dimension a given unit refers to.
        """

        unit, factor = self.parse_unit(unit)
        for dim in self.dimensions.keys():
            if unit in self.dimensions[dim]:
                return dim
        raise HydraError('Unit %s not found.' % (unit))
示例#22
0
def get_role(role_id,**kwargs):
    """
        Get a role by its ID.
    """
    try:
        role = DBSession.query(Role).filter(Role.role_id==role_id).one()
        return role
    except NoResultFound: 
        raise HydraError("Role not found (role_id=%s)", role_id)
示例#23
0
def share_project(project_id, usernames, read_only, share, **kwargs):
    """
        Share an entire project with a list of users, identifed by
        their usernames.

        The read_only flag ('Y' or 'N') must be set
        to 'Y' to allow write access or sharing.

        The share flat ('Y' or 'N') must be set to 'Y' to allow the
        project to be shared with other users
    """
    user_id = kwargs.get('user_id')

    proj_i = _get_project(project_id)

    #Is the sharing user allowed to share this project?
    proj_i.check_share_permission(int(user_id))

    user_id = int(user_id)

    for owner in proj_i.owners:
        if user_id == owner.user_id:
            break
    else:
        raise HydraError("Permission Denied. Cannot share project.")

    if read_only == 'Y':
        write = 'N'
        share = 'N'
    else:
        write = 'Y'

    if proj_i.created_by != user_id and share == 'Y':
        raise HydraError("Cannot share the 'sharing' ability as user %s is not"
                         " the owner of project %s" % (user_id, project_id))

    for username in usernames:
        user_i = _get_user(username)

        proj_i.set_owner(user_i.user_id, write=write, share=share)

        for net_i in proj_i.networks:
            net_i.set_owner(user_i.user_id, write=write, share=share)
    DBSession.flush()
示例#24
0
def clone_dataset(dataset_id, **kwargs):
    """
        Get a single dataset, by ID
    """

    user_id = int(kwargs.get('user_id'))

    if dataset_id is None:
        return None

    dataset = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset_id).options(
            joinedload_all('metadata')).first()

    if dataset is None:
        raise HydraError("Dataset %s does not exist." % (dataset_id))

    if dataset is not None and dataset.created_by != user_id:
        owner = DBSession.query(DatasetOwner).filter(
            DatasetOwner.dataset_id == Dataset.dataset_id,
            DatasetOwner.user_id == user_id).first()
        if owner is None:
            raise PermissionError(
                "User %s is not an owner of dataset %s and therefore cannot clone it."
                % (user_id, dataset_id))

    DBSession.expunge(dataset)

    make_transient(dataset)

    dataset.data_name = dataset.data_name + "(Clone)"
    dataset.dataset_id = None
    dataset.cr_date = None

    #Try to avoid duplicate metadata entries if the entry has been cloned previously
    for m in dataset.metadata:
        if m.metadata_name in ("clone_of", "cloned_by"):
            del (m)

    cloned_meta = Metadata()
    cloned_meta.metadata_name = "clone_of"
    cloned_meta.metadata_val = str(dataset_id)
    dataset.metadata.append(cloned_meta)
    cloned_meta = Metadata()
    cloned_meta.metadata_name = "cloned_by"
    cloned_meta.metadata_val = str(user_id)
    dataset.metadata.append(cloned_meta)

    dataset.set_hash()
    DBSession.add(dataset)
    DBSession.flush()

    cloned_dataset = DBSession.query(Dataset).filter(
        Dataset.dataset_id == dataset.dataset_id).first()

    return cloned_dataset
示例#25
0
    def get_resource_attr_collection(ctx, collection_id):
        """
            Delete a resource attribute collection
        """
        collection_i = DBSession.query(ResourceAttrCollection).filter(ResourceAttrCollection.collection_id==collection_id).first()

        if collection_i is None:
            raise HydraError("No collection with ID %s", collection_id)

        return HydraResourceAttrCollection(collection_i) 
示例#26
0
def _delete_resourcescenario(scenario_id, resource_scenario):
    ra_id = resource_scenario.resource_attr_id
    try:
        sd_i = DBSession.query(ResourceScenario).filter(
            ResourceScenario.scenario_id == scenario_id,
            ResourceScenario.resource_attr_id == ra_id).one()
    except NoResultFound:
        raise HydraError("ResourceAttr %s does not exist in scenario %s." %
                         (ra_id, scenario_id))
    DBSession.delete(sd_i)
示例#27
0
def remove_image(name, **kwargs):
    path = config.get('filesys', 'img_src')

    path = os.path.join(path, name)
    if (os.path.exists(path)):
        os.remove(path)
    else:
        raise HydraError("File with name (%s) does not exist!" % (name))

    return True
示例#28
0
def check_port_available(domain, port):
    """
        Given a domain and port, check to see whether that combination is available
        for hydra to use.
    """
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    result = sock.connect_ex((domain, port))
    if result == 0:
        raise HydraError("Something else is already running on port %s" % port)
    else:
        log.info("Port %s is available", port)
示例#29
0
def remove_file(resource_type, resource_id, name, **kwargs):
    path = config.get('filesys', 'file_src')

    path = os.path.join(path, resource_type, str(resource_id), name)

    if (os.path.exists(path)):
        os.remove(path)
    else:
        raise HydraError("File with name (%s) does not exist!" % (name))

    return True
示例#30
0
def get_user_roles(uid,**kwargs):
    """
        Get the roles for a user.
        @param user_id
    """
    try:
        user_roles = DBSession.query(Role).filter(Role.role_id==RoleUser.role_id,
                                                  RoleUser.user_id==uid).all()
        return user_roles
    except NoResultFound: 
        raise HydraError("Roles not found for user (user_id=%s)", uid)