def delete_resourcegroupitem(item_id,**kwargs): group_item_i = _get_item(item_id) scenario._check_can_edit_scenario(group_item_i.scenario_id, kwargs['user_id']) DBSession.delete(group_item_i) DBSession.flush() return 'OK'
def update_role(role,**kwargs): """ Update the role. Used to add permissions and users to a role. """ check_perm(kwargs.get('user_id'), 'edit_role') try: role_i = DBSession.query(Role).filter(Role.role_id==role.id).one() role_i.role_name = role.name role_i.role_code = role.code except NoResultFound: raise ResourceNotFoundError("Role (role_id=%s) does not exist"%(role.id)) for perm in role.permissions: _get_perm(perm.id) roleperm_i = RolePerm(role_id=role.id, perm_id=perm.id ) DBSession.add(roleperm_i) for user in role.users: _get_user(user.id) roleuser_i = RoleUser(user_id=user.id, perm_id=perm.id ) DBSession.add(roleuser_i) DBSession.flush() return role_i
def update_resourcedata(scenario_id, resource_scenarios,**kwargs): """ Update the data associated with a scenario. Data missing from the resource scenario will not be removed from the scenario. Use the remove_resourcedata for this task. If the resource scenario does not exist, it will be created. If the value of the resource scenario is specified as being None, the resource scenario will be deleted. If the value of the resource scenario does not exist, it will be created. If the both the resource scenario and value already exist, the resource scenario will be updated with the ID of the dataset. If the dataset being set is being changed, already exists, and is only used by a single resource scenario, then the dataset itself is updated, rather than a new one being created. """ user_id = kwargs.get('user_id') res = None _check_can_edit_scenario(scenario_id, kwargs['user_id']) scen_i = _get_scenario(scenario_id, False, False) res = [] for rs in resource_scenarios: if rs.value is not None: updated_rs = _update_resourcescenario(scen_i, rs, user_id=user_id, source=kwargs.get('app_name')) res.append(updated_rs) else: _delete_resourcescenario(scenario_id, rs) DBSession.flush() return res
def add_attribute(attr,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ log.debug("Adding attribute: %s", attr.name) if attr.dimen is None or attr.dimen.lower() == 'dimensionless': log.info("Setting 'dimesionless' on attribute %s", attr.name) attr.dimen = 'dimensionless' try: attr_i = DBSession.query(Attr).filter(Attr.attr_name == attr.name, Attr.attr_dimen == attr.dimen).one() log.info("Attr already exists") except NoResultFound: attr_i = Attr(attr_name = attr.name, attr_dimen = attr.dimen) attr_i.attr_description = attr.description DBSession.add(attr_i) DBSession.flush() log.info("New attr added") return attr_i
def set_network_permission(network_id, usernames, read, write, share,**kwargs): """ Set permissions on a network to a list of users, identifed by their usernames. The read flag ('Y' or 'N') sets read access, the write flag sets write access. If the read flag is 'N', then there is automatically no write access or share access. """ user_id = kwargs.get('user_id') net_i = _get_network(network_id) #Check if the user is allowed to share this network. net_i.check_share_permission(user_id) #You cannot edit something you cannot see. if read == 'N': write = 'N' share = 'N' for username in usernames: user_i = _get_user(username) #The creator of a network must always have read and write access #to their project if net_i.created_by == user_i.user_id: raise HydraError("Cannot set permissions on network %s" " for user %s as tis user is the creator." % (network_id, username)) net_i.set_owner(user_i.user_id, read=read, write=write, share=share) DBSession.flush()
def update_rule(rule, **kwargs): rule_i = _get_rule(rule.id) if rule.ref_key != rule_i.ref_key: raise HydraError("Cannot convert a %s rule to a %s rule. Please create a new rule instead."%(rule_i.ref_key, rule.ref_key)) if rule.ref_key == 'NETWORK': rule_i.network_id = rule.ref_id elif rule.ref_key == 'NODE': rule_i.node_id = rule.ref_id elif rule.ref_key == 'LINK': rule_i.link_id = rule.ref_id elif rule.ref_key == 'GROUP': rule_i.group_id = rule.group_id else: raise HydraError("Ref Key %s not recognised.") rule_i.scenario_id = rule.scenario_id rule_i.rule_name = rule.name rule_i.rule_description = rule.description rule_i.rule_text = rule.text DBSession.flush() return rule_i
def bulk_update_resourcedata(scenario_ids, resource_scenarios,**kwargs): """ Update the data associated with a list of scenarios. """ user_id = kwargs.get('user_id') res = None res = {} net_ids = DBSession.query(Scenario.network_id).filter(Scenario.scenario_id.in_(scenario_ids)).all() if len(set(net_ids)) != 1: raise HydraError("Scenario IDS are not in the same network") for scenario_id in scenario_ids: _check_can_edit_scenario(scenario_id, kwargs['user_id']) scen_i = _get_scenario(scenario_id, False, False) res[scenario_id] = [] for rs in resource_scenarios: if rs.value is not None: updated_rs = _update_resourcescenario(scen_i, rs, user_id=user_id, source=kwargs.get('app_name')) res[scenario_id].append(updated_rs) else: _delete_resourcescenario(scenario_id, rs) DBSession.flush() return res
def add_resource_attribute(resource_type, resource_id, attr_id, is_var,**kwargs): """ Add a resource attribute attribute to a resource. attr_is_var indicates whether the attribute is a variable or not -- this is used in simulation to indicate that this value is expected to be filled in by the simulator. """ attr = DBSession.query(Attr).filter(Attr.attr_id==attr_id).first() if attr is None: raise ResourceNotFoundError("Attribute with ID %s does not exist."%attr_id) resource_i = _get_resource(resource_type, resource_id) for ra in resource_i.attributes: if ra.attr_id == attr_id: raise HydraError("Duplicate attribute. %s %s already has attribute %s" %(resource_type, resource_i.get_name(), attr.attr_name)) attr_is_var = 'Y' if is_var else 'N' new_ra = resource_i.add_attribute(attr_id, attr_is_var) DBSession.flush() return new_ra
def update_attribute(attr,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ if attr.dimen is None or attr.dimen.lower() == 'dimensionless': log.info("Setting 'dimesionless' on attribute %s", attr.name) attr.dimen = 'dimensionless' log.debug("Adding attribute: %s", attr.name) attr_i = _get_attr(Attr.attr_id) attr_i.attr_name = attr.name attr_i.attr_dimen = attr.dimension attr_i.attr_description = attr.description #Make sure an update hasn't caused an inconsistency. check_attr_dimension(attr_i.attr_id) DBSession.flush() return attr_i
def delete_rule(rule_id, **kwargs): rule_i = _get_rule(rule_id) rule_i.status = 'X' DBSession.flush()
def assign_value(rs, data_type, val, units, name, dimension, metadata={}, data_hash=None, user_id=None, source=None): """ Insert or update a piece of data in a scenario. If the dataset is being shared by other resource scenarios, a new dataset is inserted. If the dataset is ONLY being used by the resource scenario in question, the dataset is updated to avoid unnecessary duplication. """ log.debug("Assigning value %s to rs %s in scenario %s", name, rs.resource_attr_id, rs.scenario_id) if rs.scenario.locked == 'Y': raise PermissionError("Cannot assign value. Scenario %s is locked" %(rs.scenario_id)) #Check if this RS is the only RS in the DB connected to this dataset. #If no results is found, the RS isn't in the DB yet, so the condition is false. update_dataset = False # Default behaviour is to create a new dataset. if rs.dataset is not None: #Has this dataset changed? if rs.dataset.data_hash == data_hash: log.debug("Dataset has not changed. Returning.") return connected_rs = DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==rs.dataset.dataset_id).all() #If there's no RS found, then the incoming rs is new, so the dataset can be altered #without fear of affecting something else. if len(connected_rs) == 0: #If it's 1, the RS exists in the DB, but it's the only one using this dataset or #The RS isn't in the DB yet and the datset is being used by 1 other RS. update_dataset = True if len(connected_rs) == 1 : if connected_rs[0].scenario_id == rs.scenario_id and connected_rs[0].resource_attr_id==rs.resource_attr_id: update_dataset = True else: update_dataset=False if update_dataset is True: log.info("Updating dataset '%s'", name) dataset = data.update_dataset(rs.dataset.dataset_id, name, data_type, val, units, dimension, metadata, **dict(user_id=user_id)) rs.dataset = dataset rs.dataset_id = dataset.dataset_id else: log.info("Creating new dataset %s in scenario %s", name, rs.scenario_id) dataset = data.add_dataset(data_type, val, units, dimension, metadata=metadata, name=name, **dict(user_id=user_id)) rs.dataset = dataset rs.source = source DBSession.flush()
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs): """ Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist). """ rm = aliased(ResourceAttrMap, name='rm') #Check the mapping exists. mapping = DBSession.query(rm).filter( or_( and_( rm.resource_attr_id_a == source_resource_attr_id, rm.resource_attr_id_b == target_resource_attr_id ), and_( rm.resource_attr_id_a == target_resource_attr_id, rm.resource_attr_id_b == source_resource_attr_id ) ) ).first() if mapping is None: raise ResourceNotFoundError("Mapping between %s and %s not found"% (source_resource_attr_id, target_resource_attr_id)) #check scenarios exist s1 = _get_scenario(source_scenario_id, False, False) s2 = _get_scenario(target_scenario_id, False, False) rs = aliased(ResourceScenario, name='rs') rs1 = DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id, rs.scenario_id == source_scenario_id).first() rs2 = DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id, rs.scenario_id == target_scenario_id).first() #3 possibilities worth considering: #1: Both RS exist, so update the target RS #2: Target RS does not exist, so create it with the dastaset from RS1 #3: Source RS does not exist, so it must be removed from the target scenario if it exists return_value = None#Either return null or return a new or updated resource scenario if rs1 is not None: if rs2 is not None: log.info("Destination Resource Scenario exists. Updating dastaset ID") rs2.dataset_id = rs1.dataset_id else: log.info("Destination has no data, so making a new Resource Scenario") rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id) DBSession.add(rs2) DBSession.flush() return_value = rs2 else: log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario") if rs2 is not None: DBSession.delete(rs2) DBSession.flush() return return_value
def delete_dataset_collection(collection_id,**kwargs): try: collection = DBSession.query(DatasetCollection).filter(DatasetCollection.collection_id==collection_id).one() except NoResultFound: raise ResourceNotFoundError("No dataset collection found with id %s"%collection_id) DBSession.delete(collection) DBSession.flush()
def add_role(role,**kwargs): """ """ check_perm(kwargs.get('user_id'), 'add_role') role_i = Role(role_name=role.name, role_code=role.code) DBSession.add(role_i) DBSession.flush() return role_i
def bulk_insert_data(data, **kwargs): datasets = _bulk_insert_data(data, user_id=kwargs.get('user_id'), source=kwargs.get('app_name')) #This line exists to make the DBSession 'dirty', #thereby telling it to flush the bulk insert. datasets[0].data_name = datasets[0].data_name DBSession.flush() return datasets
def add_perm(perm,**kwargs): """ """ check_perm(kwargs.get('user_id'), 'add_perm') perm_i = Perm(perm_name=perm.name, perm_code=perm.code) DBSession.add(perm_i) DBSession.flush() return perm_i
def add_dataset_collection(collection,**kwargs): coln_i = DatasetCollection(collection_name=collection.name) for dataset_id in collection.dataset_ids: datasetitem = DatasetCollectionItem(dataset_id=dataset_id) coln_i.items.append(datasetitem) DBSession.add(coln_i) DBSession.flush() return coln_i
def purge_scenario(scenario_id, **kwargs): """ Set the status of a scenario. """ _check_can_edit_scenario(scenario_id, kwargs['user_id']) scenario_i = _get_scenario(scenario_id, False, False) DBSession.delete(scenario_i) DBSession.flush() return 'OK'
def set_role_perm(role_id, perm_id,**kwargs): check_perm(kwargs.get('user_id'), 'edit_perm') _get_perm(perm_id) _get_role(role_id) roleperm_i = RolePerm(role_id=role_id, perm_id=perm_id) DBSession.add(roleperm_i) DBSession.flush() return roleperm_i.role
def delete_project(project_id,**kwargs): """ Set the status of a project to 'X' """ user_id = kwargs.get('user_id') #check_perm(user_id, 'delete_project') project = _get_project(project_id) project.check_write_permission(user_id) DBSession.delete(project) DBSession.flush()
def convert_dataset(dataset_id, to_unit,**kwargs): """Convert a whole dataset (specified by 'dataset_id' to new unit ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function returns the dataset ID of new dataset. """ ds_i = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one() dataset_type = ds_i.data_type dsval = ds_i.get_val() old_unit = ds_i.data_units if old_unit is not None: if dataset_type == 'scalar': new_val = hydra_units.convert(float(dsval), old_unit, to_unit) elif dataset_type == 'array': dim = array_dim(dsval) vecdata = arr_to_vector(dsval) newvec = hydra_units.convert(vecdata, old_unit, to_unit) new_val = vector_to_arr(newvec, dim) elif dataset_type == 'timeseries': new_val = [] for ts_time, ts_val in dsval.items(): dim = array_dim(ts_val) vecdata = arr_to_vector(ts_val) newvec = hydra_units.convert(vecdata, old_unit, to_unit) newarr = vector_to_arr(newvec, dim) new_val.append(ts_time, newarr) elif dataset_type == 'descriptor': raise HydraError('Cannot convert descriptor.') new_dataset = Dataset() new_dataset.data_units = to_unit new_dataset.set_val(dataset_type, new_val) new_dataset.data_dimen = ds_i.data_dimen new_dataset.data_name = ds_i.data_name new_dataset.data_type = ds_i.data_type new_dataset.hidden = 'N' new_dataset.set_metadata(ds_i.get_metadata_as_dict()) new_dataset.set_hash() existing_ds = DBSession.query(Dataset).filter(Dataset.data_hash==new_dataset.data_hash).first() if existing_ds is not None: DBSession.expunge_all() return existing_ds.dataset_id DBSession.add(new_dataset) DBSession.flush() return new_dataset.dataset_id else: raise HydraError('Dataset has no units.')
def set_scenario_status(scenario_id, status, **kwargs): """ Set the status of a scenario. """ _check_can_edit_scenario(scenario_id, kwargs['user_id']) scenario_i = _get_scenario(scenario_id, False, False) scenario_i.status = status DBSession.flush() return 'OK'
def add_resourcegroup(group, network_id,**kwargs): """ Add a new group to a network. """ group_i = ResourceGroup() group_i.group_name = group.name group_i.group_description = group.description group_i.status = group.status group_i.network_id = network_id DBSession.add(group_i) DBSession.flush() return group_i
def unlock_scenario(scenario_id, **kwargs): #user_id = kwargs.get('user_id') #check_perm(user_id, 'edit_network') scenario_i = _get_scenario(scenario_id, False, False) owner = _check_network_owner(scenario_i.network, kwargs['user_id']) if owner.edit == 'Y': scenario_i.locked = 'N' else: raise PermissionError('User %s cannot unlock scenario %s' % (kwargs['user_id'], scenario_id)) DBSession.flush() return 'OK'
def set_user_role(new_user_id, role_id,**kwargs): check_perm(kwargs.get('user_id'), 'edit_role') try: _get_user(new_user_id) _get_role(role_id) roleuser_i = RoleUser(user_id=new_user_id, role_id=role_id) DBSession.add(roleuser_i) DBSession.flush() except: # Will occur if the foreign keys do not exist raise ResourceNotFoundError("User or Role does not exist") return roleuser_i.role
def clone_dataset(dataset_id,**kwargs): """ Get a single dataset, by ID """ user_id = int(kwargs.get('user_id')) if dataset_id is None: return None dataset = DBSession.query(Dataset).filter( Dataset.dataset_id==dataset_id).options(joinedload_all('metadata')).first() if dataset is None: raise HydraError("Dataset %s does not exist."%(dataset_id)) if dataset is not None and dataset.created_by != user_id: owner = DBSession.query(DatasetOwner).filter( DatasetOwner.dataset_id==Dataset.dataset_id, DatasetOwner.user_id==user_id).first() if owner is None: raise PermissionError("User %s is not an owner of dataset %s and therefore cannot clone it."%(user_id, dataset_id)) DBSession.expunge(dataset) make_transient(dataset) dataset.data_name = dataset.data_name + "(Clone)" dataset.dataset_id = None dataset.cr_date = None #Try to avoid duplicate metadata entries if the entry has been cloned previously for m in dataset.metadata: if m.metadata_name in ("clone_of", "cloned_by"): del(m) cloned_meta = Metadata() cloned_meta.metadata_name = "clone_of" cloned_meta.metadata_val = str(dataset_id) dataset.metadata.append(cloned_meta) cloned_meta = Metadata() cloned_meta.metadata_name = "cloned_by" cloned_meta.metadata_val = str(user_id) dataset.metadata.append(cloned_meta) dataset.set_hash() DBSession.add(dataset) DBSession.flush() cloned_dataset = DBSession.query(Dataset).filter( Dataset.dataset_id==dataset.dataset_id).first() return cloned_dataset
def update_resourcegroup(group,**kwargs): """ Add a new group to a network. """ group_i = _get_group(group.id) group_i.group_name = group.name group_i.group_description = group.description group_i.status = group.status DBSession.flush() return group_i
def add_attributes(attrs,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ log.debug("Adding s: %s", [attr.name for attr in attrs]) #Check to see if any of the attributs being added are already there. #If they are there already, don't add a new one. If an attribute #with the same name is there already but with a different dimension, #add a new attribute. all_attrs = DBSession.query(Attr).all() attr_dict = {} for attr in all_attrs: attr_dict[(attr.attr_name, attr.attr_dimen)] = attr attrs_to_add = [] existing_attrs = [] for potential_new_attr in attrs: if potential_new_attr.dimen is None or potential_new_attr.dimen.lower() == 'dimensionless': potential_new_attr.dimen = 'dimensionless' if attr_dict.get((potential_new_attr.name, potential_new_attr.dimen)) is None: attrs_to_add.append(potential_new_attr) else: existing_attrs.append(attr_dict.get((potential_new_attr.name, potential_new_attr.dimen))) new_attrs = [] for attr in attrs_to_add: attr_i = Attr() attr_i.attr_name = attr.name attr_i.attr_dimen = attr.dimen attr_i.attr_description = attr.description DBSession.add(attr_i) new_attrs.append(attr_i) DBSession.flush() new_attrs = new_attrs + existing_attrs return new_attrs
def remove_dataset_from_collection(dataset_id, collection_id, **kwargs): """ Add a single dataset to a dataset collection. """ _get_collection(collection_id) collection_item = _get_collection_item(collection_id, dataset_id) if collection_item is None: raise HydraError("Dataset %s is not in collection %s.", dataset_id, collection_id) DBSession.delete(collection_item) DBSession.flush() return 'OK'
def unshare_project(project_id, usernames,**kwargs): """ Un-share a project with a list of users, identified by their usernames. """ user_id = kwargs.get('user_id') proj_i = _get_project(project_id) proj_i.check_share_permission(user_id) for username in usernames: user_i = _get_user(username) #Set the owner ship on the network itself proj_i.unset_owner(user_i.user_id, write=write, share=share) DBSession.flush()
def remove_items_from_attr_collection(ctx, collection_id, resource_attr_ids): """ Add new items to a resource attribute collection """ collection_i = DBSession.query(ResourceAttrCollection).filter(ResourceAttrCollection.collection_id==collection_id).first() if collection_i is None: raise HydraError("No collection with ID %s", collection_id) for item in collection_i.items: if item.resource_attr_id in resource_attr_ids: DBSession.delete(item) DBSession.flush() return 'OK'
def update_resource_attr_collection(ctx, resourceattrcollection): """ Delete a resource attribute collection """ collection_i = DBSession.query(ResourceAttrCollection).filter(ResourceAttrCollection.collection_id==resourceattrcollection.id).first() if collection_i is None: raise HydraError("No collection with ID %s", resourceattrcollection.id) collection_i.layout = resourceattrcollection.get_layout() collection_i.name = resourceattrcollection.name DBSession.flush() return HydraResourceAttrCollection(collection_i)
def add_dataset(data_type, val, units, dimension, metadata={}, name="", user_id=None, flush=False): """ Data can exist without scenarios. This is the mechanism whereby single pieces of data can be added without doing it through a scenario. A typical use of this would be for setting default values on types. """ d = Dataset() d.set_val(data_type, val) d.set_metadata(metadata) # Assign dimension if necessary if units is not None and dimension is None: dimension = hydra_units.get_unit_dimension(units) d.data_type = data_type d.data_units = units d.data_name = name d.data_dimen = dimension d.created_by = user_id d.data_hash = d.set_hash() try: existing_dataset = DBSession.query(Dataset).filter( Dataset.data_hash == d.data_hash).one() if existing_dataset.check_user(user_id): d = existing_dataset else: d.set_metadata({'created_at': datetime.datetime.now()}) d.set_hash() DBSession.add(d) except NoResultFound: DBSession.add(d) if flush == True: DBSession.flush() return d
def add_note(note, **kwargs): """ Add a new note """ note_i = Note() note_i.ref_key = note.ref_key note_i.set_ref(note.ref_key, note.ref_id) note_i.note_text = note.text note_i.created_by = kwargs.get('user_id') DBSession.add(note_i) DBSession.flush() return note_i
def add_resource_attr_collection(ctx, ra_collection): """ Add a new resource attribute collection """ ra_i = ResourceAttrCollection() ra_i.collection_name = ra_collection.name ra_i.layout = ra_collection.layout for ra_id in ra_collection.resource_attr_ids: item_i = ResourceAttrCollectionItem() item_i.resource_attr_id = ra_id ra_i.items.append(item_i) DBSession.add(ra_i) DBSession.flush() return HydraResourceAttrCollection(ra_i)
def add_dataset_to_collection(dataset_id, collection_id, **kwargs): """ Add a single dataset to a dataset collection. """ _get_collection(collection_id) collection_item = _get_collection_item(collection_id, dataset_id) if collection_item is not None: raise HydraError("Dataset Collection %s already contains dataset %s", collection_id, dataset_id) new_item = DatasetCollectionItem() new_item.dataset_id=dataset_id new_item.collection_id=collection_id DBSession.add(new_item) DBSession.flush() return 'OK'
def set_project_permission(project_id, usernames, read, write, share, **kwargs): """ Set permissions on a project to a list of users, identifed by their usernames. The read flag ('Y' or 'N') sets read access, the write flag sets write access. If the read flag is 'N', then there is automatically no write access or share access. """ user_id = kwargs.get('user_id') proj_i = _get_project(project_id) # Is the sharing user allowed to share this project? proj_i.check_share_permission(user_id) # You cannot edit something you cannot see. if read == 'N': write = 'N' share = 'N' for username in usernames: user_i = _get_user(username) # The creator of a project must always have read and write access # to their project if proj_i.created_by == user_i.user_id: raise HydraError("Cannot set permissions on project %s" " for user %s as this user is the creator." % (project_id, username)) if (read == 'N' and write == 'N'): proj_i.unset_owner(user_i.user_id) else: proj_i.set_owner(user_i.user_id, read=read, write=write, share=share) for net_i in proj_i.networks: net_i.set_owner(user_i.user_id, read=read, write=write, share=share) DBSession.flush()
def add_resourcegroupitem(group_item, scenario_id, **kwargs): scenario._check_can_edit_scenario(scenario_id, kwargs['user_id']) #Check whether the ref_id is correct. if group_item.ref_key == 'NODE': try: DBSession.query(Node).filter( Node.node_id == group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Node group item!" % (group_item.ref_id)) elif group_item.ref_key == 'LINK': try: DBSession.query(Link).filter( Link.link_id == group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Link group item!" % (group_item.ref_id)) elif group_item.ref_key == 'GROUP': try: DBSession.query(ResourceGroup).filter( ResourceGroup.group_id == group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Group group item!" % (group_item.ref_id)) else: raise HydraError("Invalid ref key: %s" % (group_item.ref_key)) group_item_i = ResourceGroupItem() group_item_i.scenario_id = scenario_id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id DBSession.add(group_item_i) DBSession.flush() return group_item_i
def add_items_to_attr_collection(ctx, collection_id, resource_attr_ids): """ Add new items to a resource attribute collection """ collection_i = DBSession.query(ResourceAttrCollection).filter(ResourceAttrCollection.collection_id==collection_id).first() if collection_i is None: raise HydraError("No collection with ID %s", collection_id) for ra_id in resource_attr_ids: item_i = ResourceAttrCollectionItem() item_i.resource_attr_id = ra_id collection_i.items.append(item_i) DBSession.add(collection_i) DBSession.flush() return HydraResourceAttrCollection(collection_i)
def update_note(note, **kwargs): """ Update a note """ note_i = _get_note(note.id) if note.ref_key != note_i.ref_key: raise HydraError( "Cannot convert a %s note to a %s note. Please create a new note instead." % (note_i.ref_key, note.ref_key)) note_i.set_ref(note.ref_key, note.ref_id) note_i.note_text = note.text DBSession.flush() return note_i
def share_project(project_id, usernames, read_only, share, **kwargs): """ Share an entire project with a list of users, identifed by their usernames. The read_only flag ('Y' or 'N') must be set to 'Y' to allow write access or sharing. The share flat ('Y' or 'N') must be set to 'Y' to allow the project to be shared with other users """ user_id = kwargs.get('user_id') proj_i = _get_project(project_id) #Is the sharing user allowed to share this project? proj_i.check_share_permission(int(user_id)) user_id = int(user_id) for owner in proj_i.owners: if user_id == owner.user_id: break else: raise HydraError("Permission Denied. Cannot share project.") if read_only == 'Y': write = 'N' share = 'N' else: write = 'Y' if proj_i.created_by != user_id and share == 'Y': raise HydraError("Cannot share the 'sharing' ability as user %s is not" " the owner of project %s" % (user_id, project_id)) for username in usernames: user_i = _get_user(username) proj_i.set_owner(user_i.user_id, write=write, share=share) for net_i in proj_i.networks: net_i.set_owner(user_i.user_id, write=write, share=share) DBSession.flush()
def unhide_dataset(dataset_id, **kwargs): """ Hide a particular piece of data so it can only be seen by its owner. Only an owner can hide (and unhide) data. Data with no owner cannot be hidden. The exceptions paramater lists the usernames of those with permission to view the data read, write and share indicate whether these users can read, edit and share this data. """ user_id = kwargs.get('user_id') dataset_i = _get_dataset(dataset_id) #check that I can unhide the dataset if dataset_i.created_by != int(user_id): raise HydraError('Permission denied. ' 'User %s is not the owner of dataset %s' % (user_id, dataset_i.data_name)) dataset_i.hidden = 'N' DBSession.flush()
def delete_dataset(dataset_id, **kwargs): """ Removes a piece of data from the DB. CAUTION! Use with care, as this cannot be undone easily. """ try: d = DBSession.query(Dataset).filter( Dataset.dataset_id == dataset_id).one() except NoResultFound: raise HydraError("Dataset %s does not exist." % dataset_id) dataset_rs = DBSession.query(ResourceScenario).filter( ResourceScenario.dataset_id == dataset_id).all() if len(dataset_rs) > 0: raise HydraError( "Cannot delete %s. Dataset is used by resource scenarios." % dataset_id) DBSession.delete(d) DBSession.flush()
def add_resourcegroupitems(scenario_id, items, scenario=None, **kwargs): """ Get all the items in a group, in a scenario. """ user_id = int(kwargs.get('user_id')) if scenario is None: scenario = _get_scenario(scenario_id, include_data=False, include_items=False) _check_network_ownership(scenario.network_id, user_id) newitems = [] for group_item in items: group_item_i = _add_resourcegroupitem(group_item, scenario.scenario_id) newitems.append(group_item_i) DBSession.flush() return newitems
def delete_mappings_in_network(network_id, network_2_id=None, **kwargs): """ Delete all the resource attribute mappings in a network. If another network is specified, only delete the mappings between the two networks. """ qry = DBSession.query(ResourceAttrMap).filter( or_(ResourceAttrMap.network_a_id == network_id, ResourceAttrMap.network_b_id == network_id)) if network_2_id is not None: qry = qry.filter( or_(ResourceAttrMap.network_a_id == network_2_id, ResourceAttrMap.network_b_id == network_2_id)) mappings = qry.all() for m in mappings: DBSession.delete(m) DBSession.flush() return 'OK'
def update_project(project, **kwargs): """ Update a project returns a project complexmodel """ user_id = kwargs.get('user_id') #check_perm(user_id, 'update_project') proj_i = _get_project(project.id) proj_i.check_write_permission(user_id) proj_i.project_name = project.name proj_i.project_description = project.description attr_map = add_attributes(proj_i, project.attributes) proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data) proj_i.attribute_data = proj_data DBSession.flush() return proj_i
def add_user(user,**kwargs): """ """ #check_perm(kwargs.get('user_id'), 'add_user') u = User() u.username = user.username u.display_name = user.display_name user_id = _get_user_id(u.username) #If the user is already there, cannot add another with #the same username. if user_id is not None: raise HydraError("User %s already exists!"%user.username) u.password = bcrypt.hashpw(user.password.encode('utf-8'), bcrypt.gensalt()) DBSession.add(u) DBSession.flush() return u
def add_resource_attrs_from_type(type_id, resource_type, resource_id, **kwargs): """ adds all the attributes defined by a type to a node. """ type_i = _get_templatetype(type_id) resource_i = _get_resource(resource_type, resource_id) attrs = {} for attr in resource_i.attributes: attrs[attr.attr_id] = attr new_resource_attrs = [] for item in type_i.typeattrs: if attrs.get(item.attr_id) is None: ra = resource_i.add_attribute(item.attr_id) new_resource_attrs.append(ra) DBSession.flush() return new_resource_attrs
def delete_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs): """ Define one resource attribute from one network as being the same as that from another network. """ user_id = kwargs.get('user_id') rm = aliased(ResourceAttrMap, name='rm') log.info("Trying to delete attribute map. %s -> %s", resource_attr_a, resource_attr_b) mapping = DBSession.query(rm).filter( rm.resource_attr_id_a == resource_attr_a, rm.resource_attr_id_b == resource_attr_b).first() if mapping is not None: log.info("Deleting attribute map. %s -> %s", resource_attr_a, resource_attr_b) DBSession.delete(mapping) DBSession.flush() return 'OK'
def copy_data_from_scenario(resource_attrs, source_scenario_id, target_scenario_id, **kwargs): """ For a given list of resource attribute IDS copy the dataset_ids from the resource scenarios in the source scenario to those in the 'target' scenario. """ #Get all the resource scenarios we wish to update target_resourcescenarios = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id == target_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() target_rs_dict = {} for target_rs in target_resourcescenarios: target_rs_dict[target_rs.resource_attr_id] = target_rs #get all the resource scenarios we are using to get our datsets source. source_resourcescenarios = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id == source_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() #If there is an RS in scenario 'source' but not in 'target', then create #a new one in 'target' for source_rs in source_resourcescenarios: target_rs = target_rs_dict.get(source_rs.resource_attr_id) if target_rs is not None: target_rs.dataset_id = source_rs.dataset_id else: target_rs = ResourceScenario() target_rs.scenario_id = target_scenario_id target_rs.dataset_id = source_rs.dataset_id target_rs.resource_attr_id = source_rs.resource_attr_id DBSession.add(target_rs) DBSession.flush() return target_resourcescenarios
def share_network(network_id, usernames, read_only, share, **kwargs): """ Share a network with a list of users, identified by their usernames. The read_only flag ('Y' or 'N') must be set to 'Y' to allow write access or sharing. The share flat ('Y' or 'N') must be set to 'Y' to allow the project to be shared with other users """ user_id = kwargs.get('user_id') net_i = _get_network(network_id) net_i.check_share_permission(user_id) if read_only == 'Y': write = 'N' share = 'N' else: write = 'Y' if net_i.created_by != int(user_id) and share == 'Y': raise HydraError("Cannot share the 'sharing' ability as user %s is not" " the owner of network %s" % (user_id, network_id)) for username in usernames: user_i = _get_user(username) #Set the owner ship on the network itself net_i.set_owner(user_i.user_id, write=write, share=share) for o in net_i.project.owners: if o.user_id == user_i.user_id: break else: #Give the user read access to the containing project net_i.project.set_owner(user_i.user_id, write='N', share='N') DBSession.flush()
def add_project(project, **kwargs): """ Add a new project returns a project complexmodel """ user_id = kwargs.get('user_id') #check_perm(user_id, 'add_project') proj_i = Project() proj_i.project_name = project.name proj_i.project_description = project.description proj_i.created_by = user_id attr_map = add_attributes(proj_i, project.attributes) proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data) proj_i.attribute_data = proj_data proj_i.set_owner(user_id) DBSession.add(proj_i) DBSession.flush() return proj_i
def add_rule(scenario_id, rule, **kwargs): rule_i = Rule() rule_i.ref_key = rule.ref_key if rule.ref_key == 'NETWORK': rule_i.network_id = rule.ref_id elif rule.ref_key == 'NODE': rule_i.node_id = rule.ref_id elif rule.ref_key == 'LINK': rule_i.link_id = rule.ref_id elif rule.ref_key == 'GROUP': rule_i.group_id = rule.group_id else: raise HydraError("Ref Key %s not recognised.") rule_i.scenario_id = scenario_id rule_i.rule_name = rule.name rule_i.rule_description = rule.description rule_i.rule_text = rule.text DBSession.add(rule_i) DBSession.flush() return rule_i
def update_resourcedata(scenario_id, resource_scenarios, **kwargs): """ Update the data associated with a scenario. Data missing from the resource scenario will not be removed from the scenario. Use the remove_resourcedata for this task. If the resource scenario does not exist, it will be created. If the value of the resource scenario is specified as being None, the resource scenario will be deleted. If the value of the resource scenario does not exist, it will be created. If the both the resource scenario and value already exist, the resource scenario will be updated with the ID of the dataset. If the dataset being set is being changed, already exists, and is only used by a single resource scenario, then the dataset itself is updated, rather than a new one being created. """ user_id = kwargs.get('user_id') res = None _check_can_edit_scenario(scenario_id, kwargs['user_id']) scen_i = _get_scenario(scenario_id, False, False) res = [] for rs in resource_scenarios: if rs.value is not None: updated_rs = _update_resourcescenario( scen_i, rs, user_id=user_id, source=kwargs.get('app_name')) res.append(updated_rs) else: _delete_resourcescenario(scenario_id, rs) DBSession.flush() return res
def assign_value(rs, data_type, val, units, name, dimension, metadata={}, data_hash=None, user_id=None, source=None): """ Insert or update a piece of data in a scenario. If the dataset is being shared by other resource scenarios, a new dataset is inserted. If the dataset is ONLY being used by the resource scenario in question, the dataset is updated to avoid unnecessary duplication. """ log.debug("Assigning value %s to rs %s in scenario %s", name, rs.resource_attr_id, rs.scenario_id) if rs.scenario.locked == 'Y': raise PermissionError("Cannot assign value. Scenario %s is locked" % (rs.scenario_id)) #Check if this RS is the only RS in the DB connected to this dataset. #If no results is found, the RS isn't in the DB yet, so the condition is false. update_dataset = False # Default behaviour is to create a new dataset. if rs.dataset is not None: #Has this dataset changed? if rs.dataset.data_hash == data_hash: log.debug("Dataset has not changed. Returning.") return connected_rs = DBSession.query(ResourceScenario).filter( ResourceScenario.dataset_id == rs.dataset.dataset_id).all() #If there's no RS found, then the incoming rs is new, so the dataset can be altered #without fear of affecting something else. if len(connected_rs) == 0: #If it's 1, the RS exists in the DB, but it's the only one using this dataset or #The RS isn't in the DB yet and the datset is being used by 1 other RS. update_dataset = True if len(connected_rs) == 1: if connected_rs[0].scenario_id == rs.scenario_id and connected_rs[ 0].resource_attr_id == rs.resource_attr_id: update_dataset = True else: update_dataset = False if update_dataset is True: log.info("Updating dataset '%s'", name) dataset = data.update_dataset(rs.dataset.dataset_id, name, data_type, val, units, dimension, metadata, **dict(user_id=user_id)) rs.dataset = dataset rs.dataset_id = dataset.dataset_id else: log.info("Creating new dataset %s in scenario %s", name, rs.scenario_id) dataset = data.add_dataset(data_type, val, units, dimension, metadata=metadata, name=name, **dict(user_id=user_id)) rs.dataset = dataset rs.source = source DBSession.flush()
def clone_scenario(scenario_id, **kwargs): scen_i = _get_scenario(scenario_id) log.info("cloning scenario %s", scen_i.scenario_name) cloned_name = "%s (clone)" % (scen_i.scenario_name) existing_scenarios = DBSession.query(Scenario).filter( Scenario.network_id == scen_i.network_id).all() num_cloned_scenarios = 0 for existing_sceanrio in existing_scenarios: if existing_sceanrio.scenario_name.find('clone') >= 0: num_cloned_scenarios = num_cloned_scenarios + 1 if num_cloned_scenarios > 0: cloned_name = cloned_name + " %s" % (num_cloned_scenarios) log.info("Cloned scenario name is %s", cloned_name) cloned_scen = Scenario() cloned_scen.network_id = scen_i.network_id cloned_scen.scenario_name = cloned_name cloned_scen.scenario_description = scen_i.scenario_description cloned_scen.created_by = kwargs['user_id'] cloned_scen.start_time = scen_i.start_time cloned_scen.end_time = scen_i.end_time cloned_scen.time_step = scen_i.time_step log.info("New scenario created") for rs in scen_i.resourcescenarios: new_rs = ResourceScenario() new_rs.resource_attr_id = rs.resource_attr_id new_rs.dataset_id = rs.dataset_id if kwargs.get('app_name') is None: new_rs.source = rs.source else: new_rs.source = kwargs['app_name'] cloned_scen.resourcescenarios.append(new_rs) log.info("ResourceScenarios cloned") for resourcegroupitem_i in scen_i.resourcegroupitems: new_resourcegroupitem_i = ResourceGroupItem() new_resourcegroupitem_i.ref_key = resourcegroupitem_i.ref_key new_resourcegroupitem_i.link_id = resourcegroupitem_i.link_id new_resourcegroupitem_i.node_id = resourcegroupitem_i.node_id new_resourcegroupitem_i.subgroup_id = resourcegroupitem_i.subgroup_id new_resourcegroupitem_i.group_id = resourcegroupitem_i.group_id cloned_scen.resourcegroupitems.append(new_resourcegroupitem_i) log.info("Resource group items cloned.") DBSession.add(cloned_scen) DBSession.flush() log.info("Cloning finished.") return cloned_scen
def add_scenario(network_id, scenario, **kwargs): """ Add a scenario to a specified network. """ user_id = int(kwargs.get('user_id')) log.info("Adding scenarios to network") _check_network_ownership(network_id, user_id) existing_scen = DBSession.query(Scenario).filter( Scenario.scenario_name == scenario.name, Scenario.network_id == network_id).first() if existing_scen is not None: raise HydraError("Scenario with name %s already exists in network %s" % (scenario.name, network_id)) scen = Scenario() scen.scenario_name = scenario.name scen.scenario_description = scenario.description scen.layout = scenario.get_layout() scen.network_id = network_id scen.created_by = user_id scen.start_time = str(timestamp_to_ordinal( scenario.start_time)) if scenario.start_time else None scen.end_time = str(timestamp_to_ordinal( scenario.end_time)) if scenario.end_time else None scen.time_step = scenario.time_step #Just in case someone puts in a negative ID for the scenario. if scenario.id < 0: scenario.id = None if scenario.resourcescenarios is not None: #extract the data from each resourcescenario so it can all be #inserted in one go, rather than one at a time all_data = [r.value for r in scenario.resourcescenarios] datasets = data._bulk_insert_data(all_data, user_id=user_id) #record all the resource attribute ids resource_attr_ids = [ r.resource_attr_id for r in scenario.resourcescenarios ] #get all the resource scenarios into a list and bulk insert them for i, ra_id in enumerate(resource_attr_ids): rs_i = ResourceScenario() rs_i.resource_attr_id = ra_id rs_i.dataset_id = datasets[i].dataset_id rs_i.scenario_id = scen.scenario_id rs_i.dataset = datasets[i] scen.resourcescenarios.append(rs_i) if scenario.resourcegroupitems is not None: #Again doing bulk insert. for group_item in scenario.resourcegroupitems: group_item_i = ResourceGroupItem() group_item_i.scenario_id = scen.scenario_id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id scen.resourcegroupitems.append(group_item_i) DBSession.add(scen) DBSession.flush() return scen
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs): """ Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist). """ rm = aliased(ResourceAttrMap, name='rm') #Check the mapping exists. mapping = DBSession.query(rm).filter( or_( and_(rm.resource_attr_id_a == source_resource_attr_id, rm.resource_attr_id_b == target_resource_attr_id), and_(rm.resource_attr_id_a == target_resource_attr_id, rm.resource_attr_id_b == source_resource_attr_id))).first() if mapping is None: raise ResourceNotFoundError( "Mapping between %s and %s not found" % (source_resource_attr_id, target_resource_attr_id)) #check scenarios exist s1 = _get_scenario(source_scenario_id, False, False) s2 = _get_scenario(target_scenario_id, False, False) rs = aliased(ResourceScenario, name='rs') rs1 = DBSession.query(rs).filter( rs.resource_attr_id == source_resource_attr_id, rs.scenario_id == source_scenario_id).first() rs2 = DBSession.query(rs).filter( rs.resource_attr_id == target_resource_attr_id, rs.scenario_id == target_scenario_id).first() #3 possibilities worth considering: #1: Both RS exist, so update the target RS #2: Target RS does not exist, so create it with the dastaset from RS1 #3: Source RS does not exist, so it must be removed from the target scenario if it exists return_value = None #Either return null or return a new or updated resource scenario if rs1 is not None: if rs2 is not None: log.info( "Destination Resource Scenario exists. Updating dastaset ID") rs2.dataset_id = rs1.dataset_id else: log.info( "Destination has no data, so making a new Resource Scenario") rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id) DBSession.add(rs2) DBSession.flush() return_value = rs2 else: log.info( "Source Resource Scenario does not exist. Deleting destination Resource Scenario" ) if rs2 is not None: DBSession.delete(rs2) DBSession.flush() return return_value
def purge_rule(rule_id, **kwargs): rule_i = _get_rule(rule_id) DBSession.delete(rule_i) DBSession.flush()
def create_default_users_and_perms(): perms = DBSession.query(Perm).all() if len(perms) > 0: return default_perms = (("add_user", "Add User"), ("edit_user", "Edit User"), ("add_role", "Add Role"), ("edit_role", "Edit Role"), ("add_perm", "Add Permission"), ("edit_perm", "Edit Permission"), ("add_network", "Add network"), ("edit_network", "Edit network"), ("delete_network", "Delete network"), ("share_network", "Share network"), ("edit_topology", "Edit network topology"), ("add_project", "Add Project"), ("edit_project", "Edit Project"), ("delete_project", "Delete Project"), ("share_project", "Share Project"), ("edit_data", "Edit network data"), ("view_data", "View network data"), ("add_template", "Add Template"), ("edit_template", "Edit Template")) default_roles = ( ("admin", "Administrator"), ("dev", "Developer"), ("modeller", "Modeller / Analyst"), ("manager", "Manager"), ("grad", "Graduate"), ("developer", "Developer"), ("decision", "Decision Maker"), ) roleperms = ( ('admin', "add_user"), ('admin', "edit_user"), ('admin', "add_role"), ('admin', "edit_role"), ('admin', "add_perm"), ('admin', "edit_perm"), ('admin', "add_network"), ('admin', "edit_network"), ('admin', "delete_network"), ('admin', "share_network"), ('admin', "add_project"), ('admin', "edit_project"), ('admin', "delete_project"), ('admin', "share_project"), ('admin', "edit_topology"), ('admin', "edit_data"), ('admin', "view_data"), ('admin', "add_template"), ('admin', "edit_template"), ("developer", "add_network"), ("developer", "edit_network"), ("developer", "delete_network"), ("developer", "share_network"), ("developer", "add_project"), ("developer", "edit_project"), ("developer", "delete_project"), ("developer", "share_project"), ("developer", "edit_topology"), ("developer", "edit_data"), ("developer", "view_data"), ("developer", "add_template"), ("developer", "edit_template"), ("modeller", "add_network"), ("modeller", "edit_network"), ("modeller", "delete_network"), ("modeller", "share_network"), ("modeller", "edit_topology"), ("modeller", "add_project"), ("modeller", "edit_project"), ("modeller", "delete_project"), ("modeller", "share_project"), ("modeller", "edit_data"), ("modeller", "view_data"), ("manager", "edit_data"), ("manager", "view_data"), ) perm_dict = {} for code, name in default_perms: perm = Perm(perm_code=code, perm_name=name) perm_dict[code] = perm DBSession.add(perm) role_dict = {} for code, name in default_roles: role = Role(role_code=code, role_name=name) role_dict[code] = role DBSession.add(role) for role_code, perm_code in roleperms: roleperm = RolePerm() roleperm.role = role_dict[role_code] roleperm.perm = perm_dict[perm_code] DBSession.add(roleperm) DBSession.flush()