def _persist_schema(self, schema, for_, s): if schema.id != None: raise PersistenceError("Updates are not supported for Schemas") attrs = [] for (key, attr) in schema.attrs.items(): new_attr = SchemaAttribute() new_attr.kind = attr.__xmlrpc_class__ new_attr.name = attr.name new_attr.description = attr.description new_attr.units = attr.units attrs.append(new_attr) parents = list(schema.extends) schema_ = Schema() schema_.name = schema.name schema_.attributes = attrs # Set foreign keys if len(parents) > 0: attributes = [attr.name for attr in schema_.attributes] db_parents = s.query(Schema).filter(Schema.id.in_(parents)).all() if len(db_parents) != len(parents): raise PersistenceError("Could not find all parents") # Check parents are of the correct type for parent in db_parents: if parent.for_ != "schema" and parent.for_ != for_: raise PersistenceError( "Parent %d of different type to ingested schema" % (parent.id)) for parent_attr in parent.attributes: if parent_attr.name in attributes: raise PersistenceError( "Duplicate attribute definition %s from parent %d" % (parent_attr.name, parent.id)) attributes.append(parent_attr.name) schema_.extends.append(parent) # Set the schema type schema_.for_ = for_ # Persist now to get an ID ret = self._persist(schema_, s) # If the repo has a method to persist the dataset then call it and record the output fn = find_method(self.repo, "persist", "schema") if fn != None: schema_.repository_id = fn(schema_) return self._persist(schema_, s) else: return ret
def persist(self, obj, cwd=None): """The main entry point for persistence. Handles sessions.""" cls = obj.__xmlrpc_class__ fn = find_method(self, "persist", cls) if fn != None: s = orm.sessionmaker(bind=self.engine)() try: obj = fn(obj, s, cwd) s.commit() return obj finally: s.close() raise ValueError("%s not supported" % (cls))
def persist_location(self, location, session, cwd): loc = Location() if location.id != None: try: loc = session.query(Location).filter( Location.id == location.id, Location.version == location.version).one() self.save_version(session, loc) except NoResultFound: raise StaleObjectError( "No location with id=%d and version=%d to update" % (location.id, location.version)) loc.version = location.version + 1 if location.version != None else 1 copy_attrs(location, loc, ["id", "name", "latitude", "longitude", "elevation"]) # If the repo has a method to persist the dataset then call it and record the output fn = find_method(self.repo, "persist", "location") if fn != None: loc.repository_id = fn(loc) return self._persist(loc, session)
def commit(self, unit, cwd): """Commit a unit of work with file objects based in cwd. This method will alter the unit and it's contents, and returns a list of the new persisted objects. """ s = orm.sessionmaker(bind=self.engine)() ret = [] locs = {} schemas = {} datasets = {} try: unit._to_insert.sort(ingest_order) unit._to_update.sort(ingest_order) unit._to_insert = sort_datasets(unit._to_insert) # delete first # now sort to find objects by order of dependency (location then dataset) for unit_list in (unit._to_insert, unit._to_update): for obj in unit_list: oid = obj.id if obj.id < 0: obj.id = None cls = obj.__xmlrpc_class__ if cls == "dataset": if obj.location < 0: obj.location = locs[obj.location] if obj.schema < 0: obj.schema = schemas[obj.schema] if obj.data_source != None and isinstance( obj.data_source, DatasetDataSource ) and obj.data_source.dataset_id < 0: obj.data_source.dataset_id = datasets[ obj.data_source.dataset_id] elif cls.endswith("schema"): obj.extends = [ schemas[p_id] if p_id < 0 else p_id for p_id in obj.extends ] fn = find_method(self, "persist", cls) if fn == None: raise ValueError("Could not find method for", "persist", cls) obj = fn(obj, s, cwd) if cls == "location": locs[oid] = obj.id elif cls.endswith("schema"): schemas[oid] = obj.id elif cls == "dataset": datasets[oid] = obj.id obj.correlationid = oid ret.append(obj) for obj_id in unit._to_enable: self.enable_dataset(obj_id) for obj_id in unit._to_disable: self.disable_dataset(obj_id) s.commit() return ret except Exception as e: s.rollback() raise finally: s.close()
ds.data_source.sampling.parameters, SamplingParameter) # # Clean up the sampling link # if ds.sampling == None and sampling != None: # ds.sampling = Sampling() # elif ds.sampling != None and sampling == None: # del ds.sampling # # If the sampling object actually exists then populate it # if ds.sampling != None: # ds.sampling.kind = sampling["class"] # del sampling["class"] # merge_parameters(sampling, ds.sampling.parameters, SamplingParameter) # If the repo has a method to persist the dataset then call it and record the output fn = find_method(self.repo, "persist", "dataset") if fn != None: ds.repository_id = fn(ds, schema, location) self._persist(ds, session) return self._get_dataset(ds.id, session) @method("persist", "region") def persist_region(self, region, session, cwd): points = list(region.region_points) reg = Region() if region.id != None: try: reg = session.query(Region).filter( Region.id == region.id, Region.version == region.version).one()