def _add_resourcegroupitem(group_item, scenario_id): """ Add a single resource group item (no DB flush, as it's an internal function) """ if group_item.id and group_item.id > 0: try: group_item_i = DBSession.query(ResourceGroupItem).filter( ResourceGroupItem.item_id == group_item.id).one() except NoResultFound: raise ResourceNotFoundError("ResourceGroupItem %s not found" % (group_item.id)) else: group_item_i = ResourceGroupItem() group_item_i.group_id = group_item.group_id if scenario_id is not None: group_item_i.scenario_id = scenario_id ref_key = group_item.ref_key group_item_i.ref_key = ref_key if ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id DBSession.add(group_item_i) return group_item_i
def add_project(project, **kwargs): """ Add a new project returns a project complexmodel """ user_id = kwargs.get('user_id') # check_perm(user_id, 'add_project') proj_i = Project() proj_i.project_name = project.name proj_i.project_description = project.description proj_i.created_by = user_id if project.layout is not None: proj_i.layout = project.get_layout() attr_map = add_attributes(proj_i, project.attributes) proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data) proj_i.attribute_data = proj_data proj_i.set_owner(user_id) DBSession.add(proj_i) DBSession.flush() return proj_i
def add_attribute(attr,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ log.debug("Adding attribute: %s", attr.name) if attr.dimen is None or attr.dimen.lower() == 'dimensionless': log.info("Setting 'dimesionless' on attribute %s", attr.name) attr.dimen = 'dimensionless' try: attr_i = DBSession.query(Attr).filter(Attr.attr_name == attr.name, Attr.attr_dimen == attr.dimen).one() log.info("Attr already exists") except NoResultFound: attr_i = Attr(attr_name = attr.name, attr_dimen = attr.dimen) attr_i.attr_description = attr.description DBSession.add(attr_i) DBSession.flush() log.info("New attr added") return attr_i
def update_role(role,**kwargs): """ Update the role. Used to add permissions and users to a role. """ check_perm(kwargs.get('user_id'), 'edit_role') try: role_i = DBSession.query(Role).filter(Role.role_id==role.id).one() role_i.role_name = role.name role_i.role_code = role.code except NoResultFound: raise ResourceNotFoundError("Role (role_id=%s) does not exist"%(role.id)) for perm in role.permissions: _get_perm(perm.id) roleperm_i = RolePerm(role_id=role.id, perm_id=perm.id ) DBSession.add(roleperm_i) for user in role.users: _get_user(user.id) roleuser_i = RoleUser(user_id=user.id, perm_id=perm.id ) DBSession.add(roleuser_i) DBSession.flush() return role_i
def _add_resourcegroupitem(group_item, scenario_id): """ Add a single resource group item (no DB flush, as it's an internal function) """ if group_item.id and group_item.id > 0: try: group_item_i = DBSession.query(ResourceGroupItem).filter(ResourceGroupItem.item_id == group_item.id).one() except NoResultFound: raise ResourceNotFoundError("ResourceGroupItem %s not found" % (group_item.id)) else: group_item_i = ResourceGroupItem() group_item_i.group_id = group_item.group_id if scenario_id is not None: group_item_i.scenario_id = scenario_id ref_key = group_item.ref_key group_item_i.ref_key = ref_key if ref_key == 'NODE': group_item_i.node_id =group_item.ref_id elif ref_key == 'LINK': group_item_i.link_id =group_item.ref_id elif ref_key == 'GROUP': group_item_i.subgroup_id =group_item.ref_id DBSession.add(group_item_i) return group_item_i
def make_root_user(): try: user = DBSession.query(User).filter(User.username == 'root').one() except NoResultFound: user = User(username='******', password=bcrypt.hashpw('', bcrypt.gensalt()), display_name='Root User') DBSession.add(user) try: role = DBSession.query(Role).filter(Role.role_code == 'admin').one() except NoResultFound: raise HydraError("Admin role not found.") try: userrole = DBSession.query(RoleUser).filter( RoleUser.role_id == role.role_id, RoleUser.user_id == user.user_id).one() except NoResultFound: userrole = RoleUser(role_id=role.role_id, user_id=user.user_id) user.roleusers.append(userrole) DBSession.add(userrole) DBSession.flush() transaction.commit()
def add_attribute(attr, **kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ log.debug("Adding attribute: %s", attr.name) if attr.dimen is None or attr.dimen.lower() == 'dimensionless': log.info("Setting 'dimesionless' on attribute %s", attr.name) attr.dimen = 'dimensionless' try: attr_i = DBSession.query(Attr).filter( Attr.attr_name == attr.name, Attr.attr_dimen == attr.dimen).one() log.info("Attr already exists") except NoResultFound: attr_i = Attr(attr_name=attr.name, attr_dimen=attr.dimen) attr_i.attr_description = attr.description DBSession.add(attr_i) DBSession.flush() log.info("New attr added") return attr_i
def update_role(role,**kwargs): """ Update the role. Used to add permissions and users to a role. """ #check_perm(kwargs.get('user_id'), 'edit_role') try: role_i = DBSession.query(Role).filter(Role.role_id==role.id).one() role_i.role_name = role.name role_i.role_code = role.code except NoResultFound: raise ResourceNotFoundError("Role (role_id=%s) does not exist"%(role.id)) for perm in role.permissions: _get_perm(perm.id) roleperm_i = RolePerm(role_id=role.id, perm_id=perm.id ) DBSession.add(roleperm_i) for user in role.users: _get_user(user.id) roleuser_i = RoleUser(user_id=user.id, perm_id=perm.id ) DBSession.add(roleuser_i) DBSession.flush() return role_i
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs): """ Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist). """ rm = aliased(ResourceAttrMap, name='rm') #Check the mapping exists. mapping = DBSession.query(rm).filter( or_( and_( rm.resource_attr_id_a == source_resource_attr_id, rm.resource_attr_id_b == target_resource_attr_id ), and_( rm.resource_attr_id_a == target_resource_attr_id, rm.resource_attr_id_b == source_resource_attr_id ) ) ).first() if mapping is None: raise ResourceNotFoundError("Mapping between %s and %s not found"% (source_resource_attr_id, target_resource_attr_id)) #check scenarios exist s1 = _get_scenario(source_scenario_id, False, False) s2 = _get_scenario(target_scenario_id, False, False) rs = aliased(ResourceScenario, name='rs') rs1 = DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id, rs.scenario_id == source_scenario_id).first() rs2 = DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id, rs.scenario_id == target_scenario_id).first() #3 possibilities worth considering: #1: Both RS exist, so update the target RS #2: Target RS does not exist, so create it with the dastaset from RS1 #3: Source RS does not exist, so it must be removed from the target scenario if it exists return_value = None#Either return null or return a new or updated resource scenario if rs1 is not None: if rs2 is not None: log.info("Destination Resource Scenario exists. Updating dastaset ID") rs2.dataset_id = rs1.dataset_id else: log.info("Destination has no data, so making a new Resource Scenario") rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id) DBSession.add(rs2) DBSession.flush() return_value = rs2 else: log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario") if rs2 is not None: DBSession.delete(rs2) DBSession.flush() return return_value
def add_perm(perm,**kwargs): """ """ check_perm(kwargs.get('user_id'), 'add_perm') perm_i = Perm(perm_name=perm.name, perm_code=perm.code) DBSession.add(perm_i) DBSession.flush() return perm_i
def add_role(role,**kwargs): """ """ check_perm(kwargs.get('user_id'), 'add_role') role_i = Role(role_name=role.name, role_code=role.code) DBSession.add(role_i) DBSession.flush() return role_i
def add_role(role,**kwargs): """ """ #check_perm(kwargs.get('user_id'), 'add_role') role_i = Role(role_name=role.name, role_code=role.code) DBSession.add(role_i) DBSession.flush() return role_i
def convert_dataset(dataset_id, to_unit, **kwargs): """Convert a whole dataset (specified by 'dataset_id' to new unit ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function returns the dataset ID of new dataset. """ ds_i = DBSession.query(Dataset).filter( Dataset.dataset_id == dataset_id).one() dataset_type = ds_i.data_type dsval = ds_i.get_val() old_unit = ds_i.data_units if old_unit is not None: if dataset_type == 'scalar': new_val = hydra_units.convert(float(dsval), old_unit, to_unit) elif dataset_type == 'array': dim = array_dim(dsval) vecdata = arr_to_vector(dsval) newvec = hydra_units.convert(vecdata, old_unit, to_unit) new_val = vector_to_arr(newvec, dim) elif dataset_type == 'timeseries': new_val = [] for ts_time, ts_val in dsval.items(): dim = array_dim(ts_val) vecdata = arr_to_vector(ts_val) newvec = hydra_units.convert(vecdata, old_unit, to_unit) newarr = vector_to_arr(newvec, dim) new_val.append(ts_time, newarr) elif dataset_type == 'descriptor': raise HydraError('Cannot convert descriptor.') new_dataset = Dataset() new_dataset.data_units = to_unit new_dataset.set_val(dataset_type, new_val) new_dataset.data_dimen = ds_i.data_dimen new_dataset.data_name = ds_i.data_name new_dataset.data_type = ds_i.data_type new_dataset.hidden = 'N' new_dataset.set_metadata(ds_i.get_metadata_as_dict()) new_dataset.set_hash() existing_ds = DBSession.query(Dataset).filter( Dataset.data_hash == new_dataset.data_hash).first() if existing_ds is not None: DBSession.expunge_all() return existing_ds.dataset_id DBSession.add(new_dataset) DBSession.flush() return new_dataset.dataset_id else: raise HydraError('Dataset has no units.')
def add_perm(perm,**kwargs): """ """ #check_perm(kwargs.get('user_id'), 'add_perm') perm_i = Perm(perm_name=perm.name, perm_code=perm.code) DBSession.add(perm_i) DBSession.flush() return perm_i
def clone_dataset(dataset_id, **kwargs): """ Get a single dataset, by ID """ user_id = int(kwargs.get('user_id')) if dataset_id is None: return None dataset = DBSession.query(Dataset).filter( Dataset.dataset_id == dataset_id).options( joinedload_all('metadata')).first() if dataset is None: raise HydraError("Dataset %s does not exist." % (dataset_id)) if dataset is not None and dataset.created_by != user_id: owner = DBSession.query(DatasetOwner).filter( DatasetOwner.dataset_id == Dataset.dataset_id, DatasetOwner.user_id == user_id).first() if owner is None: raise PermissionError( "User %s is not an owner of dataset %s and therefore cannot clone it." % (user_id, dataset_id)) DBSession.expunge(dataset) make_transient(dataset) dataset.data_name = dataset.data_name + "(Clone)" dataset.dataset_id = None dataset.cr_date = None #Try to avoid duplicate metadata entries if the entry has been cloned previously for m in dataset.metadata: if m.metadata_name in ("clone_of", "cloned_by"): del (m) cloned_meta = Metadata() cloned_meta.metadata_name = "clone_of" cloned_meta.metadata_val = str(dataset_id) dataset.metadata.append(cloned_meta) cloned_meta = Metadata() cloned_meta.metadata_name = "cloned_by" cloned_meta.metadata_val = str(user_id) dataset.metadata.append(cloned_meta) dataset.set_hash() DBSession.add(dataset) DBSession.flush() cloned_dataset = DBSession.query(Dataset).filter( Dataset.dataset_id == dataset.dataset_id).first() return cloned_dataset
def add_dataset_collection(collection,**kwargs): coln_i = DatasetCollection(collection_name=collection.name) for dataset_id in collection.dataset_ids: datasetitem = DatasetCollectionItem(dataset_id=dataset_id) coln_i.items.append(datasetitem) DBSession.add(coln_i) DBSession.flush() return coln_i
def add_dataset_collection(collection, **kwargs): coln_i = DatasetCollection(collection_name=collection.name) for dataset_id in collection.dataset_ids: datasetitem = DatasetCollectionItem(dataset_id=dataset_id) coln_i.items.append(datasetitem) DBSession.add(coln_i) DBSession.flush() return coln_i
def set_role_perm(role_id, perm_id,**kwargs): check_perm(kwargs.get('user_id'), 'edit_perm') _get_perm(perm_id) _get_role(role_id) roleperm_i = RolePerm(role_id=role_id, perm_id=perm_id) DBSession.add(roleperm_i) DBSession.flush() return roleperm_i.role
def set_role_perm(role_id, perm_id,**kwargs): #check_perm(kwargs.get('user_id'), 'edit_perm') _get_perm(perm_id) _get_role(role_id) roleperm_i = RolePerm(role_id=role_id, perm_id=perm_id) DBSession.add(roleperm_i) DBSession.flush() return roleperm_i.role
def add_attributes(attrs, **kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ log.debug("Adding s: %s", [attr.name for attr in attrs]) #Check to see if any of the attributs being added are already there. #If they are there already, don't add a new one. If an attribute #with the same name is there already but with a different dimension, #add a new attribute. all_attrs = DBSession.query(Attr).all() attr_dict = {} for attr in all_attrs: attr_dict[(attr.attr_name, attr.attr_dimen)] = attr attrs_to_add = [] existing_attrs = [] for potential_new_attr in attrs: if potential_new_attr.dimen is None or potential_new_attr.dimen.lower( ) == 'dimensionless': potential_new_attr.dimen = 'dimensionless' if attr_dict.get( (potential_new_attr.name, potential_new_attr.dimen)) is None: attrs_to_add.append(potential_new_attr) else: existing_attrs.append( attr_dict.get( (potential_new_attr.name, potential_new_attr.dimen))) new_attrs = [] for attr in attrs_to_add: attr_i = Attr() attr_i.attr_name = attr.name attr_i.attr_dimen = attr.dimen attr_i.attr_description = attr.description DBSession.add(attr_i) new_attrs.append(attr_i) DBSession.flush() new_attrs = new_attrs + existing_attrs return new_attrs
def convert_dataset(dataset_id, to_unit,**kwargs): """Convert a whole dataset (specified by 'dataset_id' to new unit ('to_unit'). Conversion ALWAYS creates a NEW dataset, so function returns the dataset ID of new dataset. """ ds_i = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one() dataset_type = ds_i.data_type dsval = ds_i.get_val() old_unit = ds_i.data_units if old_unit is not None: if dataset_type == 'scalar': new_val = hydra_units.convert(float(dsval), old_unit, to_unit) elif dataset_type == 'array': dim = array_dim(dsval) vecdata = arr_to_vector(dsval) newvec = hydra_units.convert(vecdata, old_unit, to_unit) new_val = vector_to_arr(newvec, dim) elif dataset_type == 'timeseries': new_val = [] for ts_time, ts_val in dsval.items(): dim = array_dim(ts_val) vecdata = arr_to_vector(ts_val) newvec = hydra_units.convert(vecdata, old_unit, to_unit) newarr = vector_to_arr(newvec, dim) new_val.append(ts_time, newarr) elif dataset_type == 'descriptor': raise HydraError('Cannot convert descriptor.') new_dataset = Dataset() new_dataset.data_units = to_unit new_dataset.set_val(dataset_type, new_val) new_dataset.data_dimen = ds_i.data_dimen new_dataset.data_name = ds_i.data_name new_dataset.data_type = ds_i.data_type new_dataset.hidden = 'N' new_dataset.set_metadata(ds_i.get_metadata_as_dict()) new_dataset.set_hash() existing_ds = DBSession.query(Dataset).filter(Dataset.data_hash==new_dataset.data_hash).first() if existing_ds is not None: DBSession.expunge_all() return existing_ds.dataset_id DBSession.add(new_dataset) DBSession.flush() return new_dataset.dataset_id else: raise HydraError('Dataset has no units.')
def create_default_net(): try: net = DBSession.query(Network).filter(Network.network_id == 1).one() except NoResultFound: project = Project(project_name="Project network") net = Network(network_name="Default network") scen = Scenario(scenario_name="Default network") project.networks.append(net) net.scenarios.append(scen) DBSession.add(net) DBSession.flush() return net
def add_resourcegroup(group, network_id, **kwargs): """ Add a new group to a network. """ group_i = ResourceGroup() group_i.group_name = group.name group_i.group_description = group.description group_i.status = group.status group_i.network_id = network_id DBSession.add(group_i) DBSession.flush() return group_i
def set_user_role(new_user_id, role_id,**kwargs): #check_perm(kwargs.get('user_id'), 'edit_role') try: _get_user(new_user_id) _get_role(role_id) roleuser_i = RoleUser(user_id=new_user_id, role_id=role_id) DBSession.add(roleuser_i) DBSession.flush() except: # Will occur if the foreign keys do not exist raise ResourceNotFoundError("User or Role does not exist") return roleuser_i.role
def add_resourcegroup(group, network_id,**kwargs): """ Add a new group to a network. """ group_i = ResourceGroup() group_i.group_name = group.name group_i.group_description = group.description group_i.status = group.status group_i.network_id = network_id DBSession.add(group_i) DBSession.flush() return group_i
def clone_dataset(dataset_id,**kwargs): """ Get a single dataset, by ID """ user_id = int(kwargs.get('user_id')) if dataset_id is None: return None dataset = DBSession.query(Dataset).filter( Dataset.dataset_id==dataset_id).options(joinedload_all('metadata')).first() if dataset is None: raise HydraError("Dataset %s does not exist."%(dataset_id)) if dataset is not None and dataset.created_by != user_id: owner = DBSession.query(DatasetOwner).filter( DatasetOwner.dataset_id==Dataset.dataset_id, DatasetOwner.user_id==user_id).first() if owner is None: raise PermissionError("User %s is not an owner of dataset %s and therefore cannot clone it."%(user_id, dataset_id)) DBSession.expunge(dataset) make_transient(dataset) dataset.data_name = dataset.data_name + "(Clone)" dataset.dataset_id = None dataset.cr_date = None #Try to avoid duplicate metadata entries if the entry has been cloned previously for m in dataset.metadata: if m.metadata_name in ("clone_of", "cloned_by"): del(m) cloned_meta = Metadata() cloned_meta.metadata_name = "clone_of" cloned_meta.metadata_val = str(dataset_id) dataset.metadata.append(cloned_meta) cloned_meta = Metadata() cloned_meta.metadata_name = "cloned_by" cloned_meta.metadata_val = str(user_id) dataset.metadata.append(cloned_meta) dataset.set_hash() DBSession.add(dataset) DBSession.flush() cloned_dataset = DBSession.query(Dataset).filter( Dataset.dataset_id==dataset.dataset_id).first() return cloned_dataset
def set_user_role(new_user_id, role_id,**kwargs): check_perm(kwargs.get('user_id'), 'edit_role') try: _get_user(new_user_id) _get_role(role_id) roleuser_i = RoleUser(user_id=new_user_id, role_id=role_id) DBSession.add(roleuser_i) DBSession.flush() except: # Will occur if the foreign keys do not exist raise ResourceNotFoundError("User or Role does not exist") return roleuser_i.role
def add_attributes(attrs,**kwargs): """ Add a generic attribute, which can then be used in creating a resource attribute, and put into a type. .. code-block:: python (Attr){ id = 1020 name = "Test Attr" dimen = "very big" } """ log.debug("Adding s: %s", [attr.name for attr in attrs]) #Check to see if any of the attributs being added are already there. #If they are there already, don't add a new one. If an attribute #with the same name is there already but with a different dimension, #add a new attribute. all_attrs = DBSession.query(Attr).all() attr_dict = {} for attr in all_attrs: attr_dict[(attr.attr_name, attr.attr_dimen)] = attr attrs_to_add = [] existing_attrs = [] for potential_new_attr in attrs: if potential_new_attr.dimen is None or potential_new_attr.dimen.lower() == 'dimensionless': potential_new_attr.dimen = 'dimensionless' if attr_dict.get((potential_new_attr.name, potential_new_attr.dimen)) is None: attrs_to_add.append(potential_new_attr) else: existing_attrs.append(attr_dict.get((potential_new_attr.name, potential_new_attr.dimen))) new_attrs = [] for attr in attrs_to_add: attr_i = Attr() attr_i.attr_name = attr.name attr_i.attr_dimen = attr.dimen attr_i.attr_description = attr.description DBSession.add(attr_i) new_attrs.append(attr_i) DBSession.flush() new_attrs = new_attrs + existing_attrs return new_attrs
def add_dataset(data_type, val, units, dimension, metadata={}, name="", user_id=None, flush=False): """ Data can exist without scenarios. This is the mechanism whereby single pieces of data can be added without doing it through a scenario. A typical use of this would be for setting default values on types. """ d = Dataset() d.set_val(data_type, val) d.set_metadata(metadata) # Assign dimension if necessary if units is not None and dimension is None: dimension = hydra_units.get_unit_dimension(units) d.data_type = data_type d.data_units = units d.data_name = name d.data_dimen = dimension d.created_by = user_id d.data_hash = d.set_hash() try: existing_dataset = DBSession.query(Dataset).filter( Dataset.data_hash == d.data_hash).one() if existing_dataset.check_user(user_id): d = existing_dataset else: d.set_metadata({'created_at': datetime.datetime.now()}) d.set_hash() DBSession.add(d) except NoResultFound: DBSession.add(d) if flush == True: DBSession.flush() return d
def set_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs): """ Define one resource attribute from one network as being the same as that from another network. """ user_id = kwargs.get('user_id') ra_1 = get_resource_attribute(resource_attr_a) ra_2 = get_resource_attribute(resource_attr_b) mapping = ResourceAttrMap(resource_attr_id_a = resource_attr_a, resource_attr_id_b = resource_attr_b, network_a_id = ra_1.get_network().network_id, network_b_id = ra_2.get_network().network_id ) DBSession.add(mapping) return mapping
def add_resource_attr_collection(ctx, ra_collection): """ Add a new resource attribute collection """ ra_i = ResourceAttrCollection() ra_i.collection_name = ra_collection.name ra_i.layout = ra_collection.layout for ra_id in ra_collection.resource_attr_ids: item_i = ResourceAttrCollectionItem() item_i.resource_attr_id = ra_id ra_i.items.append(item_i) DBSession.add(ra_i) DBSession.flush() return HydraResourceAttrCollection(ra_i)
def add_dataset_to_collection(dataset_id, collection_id, **kwargs): """ Add a single dataset to a dataset collection. """ _get_collection(collection_id) collection_item = _get_collection_item(collection_id, dataset_id) if collection_item is not None: raise HydraError("Dataset Collection %s already contains dataset %s", collection_id, dataset_id) new_item = DatasetCollectionItem() new_item.dataset_id=dataset_id new_item.collection_id=collection_id DBSession.add(new_item) DBSession.flush() return 'OK'
def add_note(note, **kwargs): """ Add a new note """ note_i = Note() note_i.ref_key = note.ref_key note_i.set_ref(note.ref_key, note.ref_id) note_i.note_text = note.text note_i.created_by = kwargs.get('user_id') DBSession.add(note_i) DBSession.flush() return note_i
def set_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs): """ Define one resource attribute from one network as being the same as that from another network. """ user_id = kwargs.get('user_id') ra_1 = get_resource_attribute(resource_attr_a) ra_2 = get_resource_attribute(resource_attr_b) mapping = ResourceAttrMap(resource_attr_id_a=resource_attr_a, resource_attr_id_b=resource_attr_b, network_a_id=ra_1.get_network().network_id, network_b_id=ra_2.get_network().network_id) DBSession.add(mapping) return mapping
def add_resourcegroupitem(group_item, scenario_id, **kwargs): scenario._check_can_edit_scenario(scenario_id, kwargs['user_id']) #Check whether the ref_id is correct. if group_item.ref_key == 'NODE': try: DBSession.query(Node).filter( Node.node_id == group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Node group item!" % (group_item.ref_id)) elif group_item.ref_key == 'LINK': try: DBSession.query(Link).filter( Link.link_id == group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Link group item!" % (group_item.ref_id)) elif group_item.ref_key == 'GROUP': try: DBSession.query(ResourceGroup).filter( ResourceGroup.group_id == group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Group group item!" % (group_item.ref_id)) else: raise HydraError("Invalid ref key: %s" % (group_item.ref_key)) group_item_i = ResourceGroupItem() group_item_i.scenario_id = scenario_id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id DBSession.add(group_item_i) DBSession.flush() return group_item_i
def add_items_to_attr_collection(ctx, collection_id, resource_attr_ids): """ Add new items to a resource attribute collection """ collection_i = DBSession.query(ResourceAttrCollection).filter(ResourceAttrCollection.collection_id==collection_id).first() if collection_i is None: raise HydraError("No collection with ID %s", collection_id) for ra_id in resource_attr_ids: item_i = ResourceAttrCollectionItem() item_i.resource_attr_id = ra_id collection_i.items.append(item_i) DBSession.add(collection_i) DBSession.flush() return HydraResourceAttrCollection(collection_i)
def add_resourcegroupitem(group_item, scenario_id,**kwargs): scenario._check_can_edit_scenario(scenario_id, kwargs['user_id']) #Check whether the ref_id is correct. if group_item.ref_key == 'NODE': try: DBSession.query(Node).filter(Node.node_id==group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Node group item!"%(group_item.ref_id)) elif group_item.ref_key == 'LINK': try: DBSession.query(Link).filter(Link.link_id==group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Link group item!"%(group_item.ref_id)) elif group_item.ref_key == 'GROUP': try: DBSession.query(ResourceGroup).filter(ResourceGroup.group_id==group_item.ref_id).one() except NoResultFound: raise HydraError("Invalid ref ID %s for a Group group item!"%(group_item.ref_id)) else: raise HydraError("Invalid ref key: %s"%(group_item.ref_key)) group_item_i = ResourceGroupItem() group_item_i.scenario_id = scenario_id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id DBSession.add(group_item_i) DBSession.flush() return group_item_i
def add_user(user,**kwargs): """ """ #check_perm(kwargs.get('user_id'), 'add_user') u = User() u.username = user.username u.display_name = user.display_name user_id = _get_user_id(u.username) #If the user is already there, cannot add another with #the same username. if user_id is not None: raise HydraError("User %s already exists!"%user.username) u.password = bcrypt.hashpw(user.password.encode('utf-8'), bcrypt.gensalt()) DBSession.add(u) DBSession.flush() return u
def add_user(user,**kwargs): """ """ check_perm(kwargs.get('user_id'), 'add_user') u = User() u.username = user.username u.display_name = user.display_name user_id = _get_user_id(u.username) #If the user is already there, cannot add another with #the same username. if user_id is not None: raise HydraError("User %s already exists!"%user.username) u.password = bcrypt.hashpw(user.password.encode('utf-8'), bcrypt.gensalt()) DBSession.add(u) DBSession.flush() return u
def add_dataset(data_type, val, units, dimension, metadata={}, name="", user_id=None, flush=False): """ Data can exist without scenarios. This is the mechanism whereby single pieces of data can be added without doing it through a scenario. A typical use of this would be for setting default values on types. """ d = Dataset() d.set_val(data_type, val) d.set_metadata(metadata) # Assign dimension if necessary if units is not None and dimension is None: dimension = hydra_units.get_unit_dimension(units) d.data_type = data_type d.data_units = units d.data_name = name d.data_dimen = dimension d.created_by = user_id d.data_hash = d.set_hash() try: existing_dataset = DBSession.query(Dataset).filter(Dataset.data_hash==d.data_hash).one() if existing_dataset.check_user(user_id): d = existing_dataset else: d.set_metadata({'created_at': datetime.datetime.now()}) d.set_hash() DBSession.add(d) except NoResultFound: DBSession.add(d) if flush == True: DBSession.flush() return d
def add_project(project,**kwargs): """ Add a new project returns a project complexmodel """ user_id = kwargs.get('user_id') #check_perm(user_id, 'add_project') proj_i = Project() proj_i.project_name = project.name proj_i.project_description = project.description proj_i.created_by = user_id attr_map = add_attributes(proj_i, project.attributes) proj_data = _add_project_attribute_data(proj_i, attr_map, project.attribute_data) proj_i.attribute_data = proj_data proj_i.set_owner(user_id) DBSession.add(proj_i) DBSession.flush() return proj_i
def copy_data_from_scenario(resource_attrs, source_scenario_id, target_scenario_id, **kwargs): """ For a given list of resource attribute IDS copy the dataset_ids from the resource scenarios in the source scenario to those in the 'target' scenario. """ #Get all the resource scenarios we wish to update target_resourcescenarios = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id == target_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() target_rs_dict = {} for target_rs in target_resourcescenarios: target_rs_dict[target_rs.resource_attr_id] = target_rs #get all the resource scenarios we are using to get our datsets source. source_resourcescenarios = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id == source_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() #If there is an RS in scenario 'source' but not in 'target', then create #a new one in 'target' for source_rs in source_resourcescenarios: target_rs = target_rs_dict.get(source_rs.resource_attr_id) if target_rs is not None: target_rs.dataset_id = source_rs.dataset_id else: target_rs = ResourceScenario() target_rs.scenario_id = target_scenario_id target_rs.dataset_id = source_rs.dataset_id target_rs.resource_attr_id = source_rs.resource_attr_id DBSession.add(target_rs) DBSession.flush() return target_resourcescenarios
def add_rule(scenario_id, rule, **kwargs): rule_i = Rule() rule_i.ref_key = rule.ref_key if rule.ref_key == 'NETWORK': rule_i.network_id = rule.ref_id elif rule.ref_key == 'NODE': rule_i.node_id = rule.ref_id elif rule.ref_key == 'LINK': rule_i.link_id = rule.ref_id elif rule.ref_key == 'GROUP': rule_i.group_id = rule.group_id else: raise HydraError("Ref Key %s not recognised.") rule_i.scenario_id = scenario_id rule_i.rule_name = rule.name rule_i.rule_description = rule.description rule_i.rule_text = rule.text DBSession.add(rule_i) DBSession.flush() return rule_i
def copy_data_from_scenario(resource_attrs, source_scenario_id, target_scenario_id, **kwargs): """ For a given list of resource attribute IDS copy the dataset_ids from the resource scenarios in the source scenario to those in the 'target' scenario. """ #Get all the resource scenarios we wish to update target_resourcescenarios = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==target_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() target_rs_dict = {} for target_rs in target_resourcescenarios: target_rs_dict[target_rs.resource_attr_id] = target_rs #get all the resource scenarios we are using to get our datsets source. source_resourcescenarios = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==source_scenario_id, ResourceScenario.resource_attr_id.in_(resource_attrs)).all() #If there is an RS in scenario 'source' but not in 'target', then create #a new one in 'target' for source_rs in source_resourcescenarios: target_rs = target_rs_dict.get(source_rs.resource_attr_id) if target_rs is not None: target_rs.dataset_id = source_rs.dataset_id else: target_rs = ResourceScenario() target_rs.scenario_id = target_scenario_id target_rs.dataset_id = source_rs.dataset_id target_rs.resource_attr_id = source_rs.resource_attr_id DBSession.add(target_rs) DBSession.flush() return target_resourcescenarios
def _update_resourcescenario(scenario, resource_scenario, dataset=None, new=False, user_id=None, source=None): """ Insert or Update the value of a resource's attribute by first getting the resource, then parsing the input data, then assigning the value. returns a ResourceScenario object. """ if scenario is None: scenario = DBSession.query(Scenario).filter( Scenario.scenario_id == 1).one() ra_id = resource_scenario.resource_attr_id log.debug("Assigning resource attribute: %s", ra_id) try: r_scen_i = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id == scenario.scenario_id, ResourceScenario.resource_attr_id == ra_id).one() except NoResultFound as e: r_scen_i = ResourceScenario() r_scen_i.resource_attr_id = resource_scenario.resource_attr_id r_scen_i.scenario_id = scenario.scenario_id DBSession.add(r_scen_i) if scenario.locked == 'Y': log.info("Scenario %s is locked", scenario.scenario_id) return r_scen_i if dataset is not None: r_scen_i.dataset = dataset return r_scen_i dataset = resource_scenario.value start_time = None frequency = None value = dataset.parse_value() log.info("Assigning %s to resource attribute: %s", value, ra_id) if value is None: log.info("Cannot set data on resource attribute %s", ra_id) return None metadata = dataset.get_metadata_as_dict(source=source, user_id=user_id) dimension = dataset.dimension data_unit = dataset.unit # Assign dimension if necessary # It happens that dimension is and empty string. We set it to # None to achieve consistency in the DB. if data_unit is not None and dimension is None or \ data_unit is not None and len(dimension) == 0: dimension = hydra_units.get_unit_dimension(data_unit) else: if dimension is None or len(dimension) == 0: dimension = None data_hash = dataset.get_hash(value, metadata) assign_value(r_scen_i, dataset.type.lower(), value, data_unit, dataset.name, dataset.dimension, metadata=metadata, data_hash=data_hash, user_id=user_id, source=source) return r_scen_i
def clone_scenario(scenario_id, **kwargs): scen_i = _get_scenario(scenario_id) log.info("cloning scenario %s", scen_i.scenario_name) cloned_name = "%s (clone)" % (scen_i.scenario_name) existing_scenarios = DBSession.query(Scenario).filter( Scenario.network_id == scen_i.network_id).all() num_cloned_scenarios = 0 for existing_sceanrio in existing_scenarios: if existing_sceanrio.scenario_name.find('clone') >= 0: num_cloned_scenarios = num_cloned_scenarios + 1 if num_cloned_scenarios > 0: cloned_name = cloned_name + " %s" % (num_cloned_scenarios) log.info("Cloned scenario name is %s", cloned_name) cloned_scen = Scenario() cloned_scen.network_id = scen_i.network_id cloned_scen.scenario_name = cloned_name cloned_scen.scenario_description = scen_i.scenario_description cloned_scen.created_by = kwargs['user_id'] cloned_scen.start_time = scen_i.start_time cloned_scen.end_time = scen_i.end_time cloned_scen.time_step = scen_i.time_step log.info("New scenario created") for rs in scen_i.resourcescenarios: new_rs = ResourceScenario() new_rs.resource_attr_id = rs.resource_attr_id new_rs.dataset_id = rs.dataset_id if kwargs.get('app_name') is None: new_rs.source = rs.source else: new_rs.source = kwargs['app_name'] cloned_scen.resourcescenarios.append(new_rs) log.info("ResourceScenarios cloned") for resourcegroupitem_i in scen_i.resourcegroupitems: new_resourcegroupitem_i = ResourceGroupItem() new_resourcegroupitem_i.ref_key = resourcegroupitem_i.ref_key new_resourcegroupitem_i.link_id = resourcegroupitem_i.link_id new_resourcegroupitem_i.node_id = resourcegroupitem_i.node_id new_resourcegroupitem_i.subgroup_id = resourcegroupitem_i.subgroup_id new_resourcegroupitem_i.group_id = resourcegroupitem_i.group_id cloned_scen.resourcegroupitems.append(new_resourcegroupitem_i) log.info("Resource group items cloned.") DBSession.add(cloned_scen) DBSession.flush() log.info("Cloning finished.") return cloned_scen
def add_scenario(network_id, scenario, **kwargs): """ Add a scenario to a specified network. """ user_id = int(kwargs.get('user_id')) log.info("Adding scenarios to network") _check_network_ownership(network_id, user_id) existing_scen = DBSession.query(Scenario).filter( Scenario.scenario_name == scenario.name, Scenario.network_id == network_id).first() if existing_scen is not None: raise HydraError("Scenario with name %s already exists in network %s" % (scenario.name, network_id)) scen = Scenario() scen.scenario_name = scenario.name scen.scenario_description = scenario.description scen.layout = scenario.get_layout() scen.network_id = network_id scen.created_by = user_id scen.start_time = str(timestamp_to_ordinal( scenario.start_time)) if scenario.start_time else None scen.end_time = str(timestamp_to_ordinal( scenario.end_time)) if scenario.end_time else None scen.time_step = scenario.time_step #Just in case someone puts in a negative ID for the scenario. if scenario.id < 0: scenario.id = None if scenario.resourcescenarios is not None: #extract the data from each resourcescenario so it can all be #inserted in one go, rather than one at a time all_data = [r.value for r in scenario.resourcescenarios] datasets = data._bulk_insert_data(all_data, user_id=user_id) #record all the resource attribute ids resource_attr_ids = [ r.resource_attr_id for r in scenario.resourcescenarios ] #get all the resource scenarios into a list and bulk insert them for i, ra_id in enumerate(resource_attr_ids): rs_i = ResourceScenario() rs_i.resource_attr_id = ra_id rs_i.dataset_id = datasets[i].dataset_id rs_i.scenario_id = scen.scenario_id rs_i.dataset = datasets[i] scen.resourcescenarios.append(rs_i) if scenario.resourcegroupitems is not None: #Again doing bulk insert. for group_item in scenario.resourcegroupitems: group_item_i = ResourceGroupItem() group_item_i.scenario_id = scen.scenario_id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id scen.resourcegroupitems.append(group_item_i) DBSession.add(scen) DBSession.flush() return scen
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs): """ Using a resource attribute mapping, take the value from the source and apply it to the target. Both source and target scenarios must be specified (and therefor must exist). """ rm = aliased(ResourceAttrMap, name='rm') #Check the mapping exists. mapping = DBSession.query(rm).filter( or_( and_(rm.resource_attr_id_a == source_resource_attr_id, rm.resource_attr_id_b == target_resource_attr_id), and_(rm.resource_attr_id_a == target_resource_attr_id, rm.resource_attr_id_b == source_resource_attr_id))).first() if mapping is None: raise ResourceNotFoundError( "Mapping between %s and %s not found" % (source_resource_attr_id, target_resource_attr_id)) #check scenarios exist s1 = _get_scenario(source_scenario_id, False, False) s2 = _get_scenario(target_scenario_id, False, False) rs = aliased(ResourceScenario, name='rs') rs1 = DBSession.query(rs).filter( rs.resource_attr_id == source_resource_attr_id, rs.scenario_id == source_scenario_id).first() rs2 = DBSession.query(rs).filter( rs.resource_attr_id == target_resource_attr_id, rs.scenario_id == target_scenario_id).first() #3 possibilities worth considering: #1: Both RS exist, so update the target RS #2: Target RS does not exist, so create it with the dastaset from RS1 #3: Source RS does not exist, so it must be removed from the target scenario if it exists return_value = None #Either return null or return a new or updated resource scenario if rs1 is not None: if rs2 is not None: log.info( "Destination Resource Scenario exists. Updating dastaset ID") rs2.dataset_id = rs1.dataset_id else: log.info( "Destination has no data, so making a new Resource Scenario") rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id) DBSession.add(rs2) DBSession.flush() return_value = rs2 else: log.info( "Source Resource Scenario does not exist. Deleting destination Resource Scenario" ) if rs2 is not None: DBSession.delete(rs2) DBSession.flush() return return_value
def create_default_users_and_perms(): perms = DBSession.query(Perm).all() if len(perms) > 0: return default_perms = (("add_user", "Add User"), ("edit_user", "Edit User"), ("add_role", "Add Role"), ("edit_role", "Edit Role"), ("add_perm", "Add Permission"), ("edit_perm", "Edit Permission"), ("add_network", "Add network"), ("edit_network", "Edit network"), ("delete_network", "Delete network"), ("share_network", "Share network"), ("edit_topology", "Edit network topology"), ("add_project", "Add Project"), ("edit_project", "Edit Project"), ("delete_project", "Delete Project"), ("share_project", "Share Project"), ("edit_data", "Edit network data"), ("view_data", "View network data"), ("add_template", "Add Template"), ("edit_template", "Edit Template")) default_roles = ( ("admin", "Administrator"), ("dev", "Developer"), ("modeller", "Modeller / Analyst"), ("manager", "Manager"), ("grad", "Graduate"), ("developer", "Developer"), ("decision", "Decision Maker"), ) roleperms = ( ('admin', "add_user"), ('admin', "edit_user"), ('admin', "add_role"), ('admin', "edit_role"), ('admin', "add_perm"), ('admin', "edit_perm"), ('admin', "add_network"), ('admin', "edit_network"), ('admin', "delete_network"), ('admin', "share_network"), ('admin', "add_project"), ('admin', "edit_project"), ('admin', "delete_project"), ('admin', "share_project"), ('admin', "edit_topology"), ('admin', "edit_data"), ('admin', "view_data"), ('admin', "add_template"), ('admin', "edit_template"), ("developer", "add_network"), ("developer", "edit_network"), ("developer", "delete_network"), ("developer", "share_network"), ("developer", "add_project"), ("developer", "edit_project"), ("developer", "delete_project"), ("developer", "share_project"), ("developer", "edit_topology"), ("developer", "edit_data"), ("developer", "view_data"), ("developer", "add_template"), ("developer", "edit_template"), ("modeller", "add_network"), ("modeller", "edit_network"), ("modeller", "delete_network"), ("modeller", "share_network"), ("modeller", "edit_topology"), ("modeller", "add_project"), ("modeller", "edit_project"), ("modeller", "delete_project"), ("modeller", "share_project"), ("modeller", "edit_data"), ("modeller", "view_data"), ("manager", "edit_data"), ("manager", "view_data"), ) perm_dict = {} for code, name in default_perms: perm = Perm(perm_code=code, perm_name=name) perm_dict[code] = perm DBSession.add(perm) role_dict = {} for code, name in default_roles: role = Role(role_code=code, role_name=name) role_dict[code] = role DBSession.add(role) for role_code, perm_code in roleperms: roleperm = RolePerm() roleperm.role = role_dict[role_code] roleperm.perm = perm_dict[perm_code] DBSession.add(roleperm) DBSession.flush()
def add_scenario(network_id, scenario,**kwargs): """ Add a scenario to a specified network. """ user_id = int(kwargs.get('user_id')) log.info("Adding scenarios to network") _check_network_ownership(network_id, user_id) existing_scen = DBSession.query(Scenario).filter(Scenario.scenario_name==scenario.name, Scenario.network_id==network_id).first() if existing_scen is not None: raise HydraError("Scenario with name %s already exists in network %s"%(scenario.name, network_id)) scen = Scenario() scen.scenario_name = scenario.name scen.scenario_description = scenario.description scen.layout = scenario.get_layout() scen.network_id = network_id scen.created_by = user_id scen.start_time = str(timestamp_to_ordinal(scenario.start_time)) if scenario.start_time else None scen.end_time = str(timestamp_to_ordinal(scenario.end_time)) if scenario.end_time else None scen.time_step = scenario.time_step #Just in case someone puts in a negative ID for the scenario. if scenario.id < 0: scenario.id = None if scenario.resourcescenarios is not None: #extract the data from each resourcescenario so it can all be #inserted in one go, rather than one at a time all_data = [r.value for r in scenario.resourcescenarios] datasets = data._bulk_insert_data(all_data, user_id=user_id) #record all the resource attribute ids resource_attr_ids = [r.resource_attr_id for r in scenario.resourcescenarios] #get all the resource scenarios into a list and bulk insert them for i, ra_id in enumerate(resource_attr_ids): rs_i = ResourceScenario() rs_i.resource_attr_id = ra_id rs_i.dataset_id = datasets[i].dataset_id rs_i.scenario_id = scen.scenario_id rs_i.dataset = datasets[i] scen.resourcescenarios.append(rs_i) if scenario.resourcegroupitems is not None: #Again doing bulk insert. for group_item in scenario.resourcegroupitems: group_item_i = ResourceGroupItem() group_item_i.scenario_id = scen.scenario_id group_item_i.group_id = group_item.group_id group_item_i.ref_key = group_item.ref_key if group_item.ref_key == 'NODE': group_item_i.node_id = group_item.ref_id elif group_item.ref_key == 'LINK': group_item_i.link_id = group_item.ref_id elif group_item.ref_key == 'GROUP': group_item_i.subgroup_id = group_item.ref_id scen.resourcegroupitems.append(group_item_i) DBSession.add(scen) DBSession.flush() return scen
def clone_scenario(scenario_id,**kwargs): scen_i = _get_scenario(scenario_id) log.info("cloning scenario %s", scen_i.scenario_name) cloned_name = "%s (clone)"%(scen_i.scenario_name) existing_scenarios = DBSession.query(Scenario).filter(Scenario.network_id==scen_i.network_id).all() num_cloned_scenarios = 0 for existing_sceanrio in existing_scenarios: if existing_sceanrio.scenario_name.find('clone') >= 0: num_cloned_scenarios = num_cloned_scenarios + 1 if num_cloned_scenarios > 0: cloned_name = cloned_name + " %s"%(num_cloned_scenarios) log.info("Cloned scenario name is %s", cloned_name) cloned_scen = Scenario() cloned_scen.network_id = scen_i.network_id cloned_scen.scenario_name = cloned_name cloned_scen.scenario_description = scen_i.scenario_description cloned_scen.created_by = kwargs['user_id'] cloned_scen.start_time = scen_i.start_time cloned_scen.end_time = scen_i.end_time cloned_scen.time_step = scen_i.time_step log.info("New scenario created") for rs in scen_i.resourcescenarios: new_rs = ResourceScenario() new_rs.resource_attr_id = rs.resource_attr_id new_rs.dataset_id = rs.dataset_id if kwargs.get('app_name') is None: new_rs.source = rs.source else: new_rs.source = kwargs['app_name'] cloned_scen.resourcescenarios.append(new_rs) log.info("ResourceScenarios cloned") for resourcegroupitem_i in scen_i.resourcegroupitems: new_resourcegroupitem_i = ResourceGroupItem() new_resourcegroupitem_i.ref_key = resourcegroupitem_i.ref_key new_resourcegroupitem_i.link_id = resourcegroupitem_i.link_id new_resourcegroupitem_i.node_id = resourcegroupitem_i.node_id new_resourcegroupitem_i.subgroup_id = resourcegroupitem_i.subgroup_id new_resourcegroupitem_i.group_id = resourcegroupitem_i.group_id cloned_scen.resourcegroupitems.append(new_resourcegroupitem_i) log.info("Resource group items cloned.") DBSession.add(cloned_scen) DBSession.flush() log.info("Cloning finished.") return cloned_scen
def _update_resourcescenario(scenario, resource_scenario, dataset=None, new=False, user_id=None, source=None): """ Insert or Update the value of a resource's attribute by first getting the resource, then parsing the input data, then assigning the value. returns a ResourceScenario object. """ if scenario is None: scenario = DBSession.query(Scenario).filter(Scenario.scenario_id==1).one() ra_id = resource_scenario.resource_attr_id log.debug("Assigning resource attribute: %s",ra_id) try: r_scen_i = DBSession.query(ResourceScenario).filter( ResourceScenario.scenario_id==scenario.scenario_id, ResourceScenario.resource_attr_id==ra_id).one() except NoResultFound as e: r_scen_i = ResourceScenario() r_scen_i.resource_attr_id = resource_scenario.resource_attr_id r_scen_i.scenario_id = scenario.scenario_id DBSession.add(r_scen_i) if scenario.locked == 'Y': log.info("Scenario %s is locked",scenario.scenario_id) return r_scen_i if dataset is not None: r_scen_i.dataset = dataset return r_scen_i dataset = resource_scenario.value start_time = None frequency = None value = dataset.parse_value() log.info("Assigning %s to resource attribute: %s", value, ra_id) if value is None: log.info("Cannot set data on resource attribute %s",ra_id) return None metadata = dataset.get_metadata_as_dict(source=source, user_id=user_id) dimension = dataset.dimension data_unit = dataset.unit # Assign dimension if necessary # It happens that dimension is and empty string. We set it to # None to achieve consistency in the DB. if data_unit is not None and dimension is None or \ data_unit is not None and len(dimension) == 0: dimension = hydra_units.get_unit_dimension(data_unit) else: if dimension is None or len(dimension) == 0: dimension = None data_hash = dataset.get_hash(value, metadata) assign_value(r_scen_i, dataset.type.lower(), value, data_unit, dataset.name, dataset.dimension, metadata=metadata, data_hash=data_hash, user_id=user_id, source=source) return r_scen_i