def all_related_revisions(self): '''Returns chronological list of all object revisions related to this package. Includes PackageRevisions, PackageTagRevisions, PackageExtraRevisions and ResourceRevisions. @return List of tuples (revision, [list of object revisions of this revision]) Ordered by most recent first. ''' from tag import PackageTag from resource import ResourceGroup, Resource from package_extra import PackageExtra results = {} # revision:[PackageRevision1, PackageTagRevision1, etc.] for pkg_rev in self.all_revisions: if not results.has_key(pkg_rev.revision): results[pkg_rev.revision] = [] results[pkg_rev.revision].append(pkg_rev) for class_ in [ResourceGroup, Resource, PackageExtra, PackageTag]: rev_class = class_.__revision_class__ if class_ == Resource: q = Session.query(rev_class).join('continuity', 'resource_group') obj_revisions = q.filter(ResourceGroup.package_id == self.id).all() else: obj_revisions = Session.query(rev_class).filter_by(package_id=self.id).all() for obj_rev in obj_revisions: if not results.has_key(obj_rev.revision): results[obj_rev.revision] = [] results[obj_rev.revision].append(obj_rev) result_list = results.items() ourcmp = lambda rev_tuple1, rev_tuple2: \ cmp(rev_tuple2[0].timestamp, rev_tuple1[0].timestamp) return sorted(result_list, cmp=ourcmp)
def load_word_info_from_feed(feed_url=None): '''Load word information (e.g. notes) from a set of entries supplied via an (atom) feed. @parm feed_url: if not provided using value from config "word_of_the_day.feed" ''' if not feed_url: from pylons import config cfg_key = 'word_of_the_day.feed' feed_url = config.get(cfg_key, '') if not feed_url: msg = 'Need a feed_url - not specified in config (%s)' % cfg_key raise ValueError(msg) # do not make a global dependency import feedparser feed = feedparser.parse(feed_url) for idx, entry in enumerate(feed.entries): out = load_entry(entry) if idx == 0: word = out[0].object_id # update current wotd to latest entry (first one) setting = KeyValue.upsert( [u'config', u'word_of_the_day', u'current'], value=word) Session.commit()
def add_relationship(self, type_, related_package, comment=u''): '''Creates a new relationship between this package and a related_package. It leaves the caller to commit the change.''' import package_relationship from ckan import model if type_ in package_relationship.PackageRelationship.get_forward_types(): subject = self object_ = related_package elif type_ in package_relationship.PackageRelationship.get_reverse_types(): type_ = package_relationship.PackageRelationship.reverse_to_forward_type(type_) assert type_ subject = related_package object_ = self else: raise KeyError, 'Package relationship type: %r' % type_ rels = self.get_relationships(with_package=related_package, type=type_, active=False, direction="forward") if rels: rel = rels[0] if comment: rel.comment=comment if rel.state == model.State.DELETED: rel.undelete() else: rel = package_relationship.PackageRelationship( subject=subject, object=object_, type=type_, comment=comment) Session.add(rel) return rel
def diff(self, to_revision=None, from_revision=None): """Overrides the diff in vdm, so that related obj revisions are diffed as well as PackageRevisions""" from tag import PackageTag from resource import ResourceGroup, Resource from package_extra import PackageExtra results = {} # field_name:diffs results.update(super(Package, self).diff(to_revision, from_revision)) # Iterate over PackageTag, PackageExtra, Resources etc. for obj_class in [ResourceGroup, Resource, PackageExtra, PackageTag]: obj_rev_class = obj_class.__revision_class__ # Query for object revisions related to this package if obj_class == Resource: obj_rev_query = ( Session.query(obj_rev_class) .join("continuity", "resource_group") .join("revision") .filter(ResourceGroup.package_id == self.id) .order_by(Revision.timestamp.desc()) ) else: obj_rev_query = ( Session.query(obj_rev_class) .filter_by(package_id=self.id) .join("revision") .order_by(Revision.timestamp.desc()) ) # Columns to include in the diff cols_to_diff = obj_class.revisioned_fields() cols_to_diff.remove("id") if obj_class is Resource: cols_to_diff.remove("resource_group_id") else: cols_to_diff.remove("package_id") # Particular object types are better known by an invariant field if obj_class is PackageTag: cols_to_diff.remove("tag_id") elif obj_class is PackageExtra: cols_to_diff.remove("key") # Iterate over each object ID # e.g. for PackageTag, iterate over Tag objects related_obj_ids = set([related_obj.id for related_obj in obj_rev_query.all()]) for related_obj_id in related_obj_ids: q = obj_rev_query.filter(obj_rev_class.id == related_obj_id) to_obj_rev, from_obj_rev = super(Package, self).get_obj_revisions_to_diff(q, to_revision, from_revision) for col in cols_to_diff: values = [getattr(obj_rev, col) if obj_rev else "" for obj_rev in (from_obj_rev, to_obj_rev)] value_diff = self._differ(*values) if value_diff: if obj_class.__name__ == "PackageTag": display_id = to_obj_rev.tag.name elif obj_class.__name__ == "PackageExtra": display_id = to_obj_rev.key else: display_id = related_obj_id[:4] key = "%s-%s-%s" % (obj_class.__name__, display_id, col) results[key] = value_diff return results
def find_by_date(cls, date, primary=False): start = datetime.combine(date,time(0,0,0)) end = datetime.combine(date,time(23,59,59)) if primary == True: return Session.query(TimeSlot).filter(TimeSlot.start_time.between(start,end)).filter(TimeSlot.primary==primary).order_by(TimeSlot.start_time).all() else: return Session.query(TimeSlot).filter(TimeSlot.start_time.between(start,end)).order_by(TimeSlot.start_time).all()
def init_model(engine): """Call me before using any of the tables or classes in the model""" sm = orm.sessionmaker(autoflush = True, autocommit = False, bind = engine) meta.engine = engine meta.Session = orm.scoped_session(sm) Session.configure(bind = engine)
def diff(self, to_revision=None, from_revision=None): '''Overrides the diff in vdm, so that related obj revisions are diffed as well as PackageRevisions''' from tag import PackageTag from resource import ResourceGroup, Resource from package_extra import PackageExtra results = {} # field_name:diffs results.update(super(Package, self).diff(to_revision, from_revision)) # Iterate over PackageTag, PackageExtra, Resources etc. for obj_class in [ResourceGroup, Resource, PackageExtra, PackageTag]: obj_rev_class = obj_class.__revision_class__ # Query for object revisions related to this package if obj_class == Resource: obj_rev_query = Session.query(obj_rev_class).\ join('continuity', 'resource_group').\ join('revision').\ filter(ResourceGroup.package_id == self.id).\ order_by(Revision.timestamp.desc()) else: obj_rev_query = Session.query(obj_rev_class).\ filter_by(package_id=self.id).\ join('revision').\ order_by(Revision.timestamp.desc()) # Columns to include in the diff cols_to_diff = obj_class.revisioned_fields() cols_to_diff.remove('id') if obj_class is Resource: cols_to_diff.remove('resource_group_id') else: cols_to_diff.remove('package_id') # Particular object types are better known by an invariant field if obj_class is PackageTag: cols_to_diff.remove('tag_id') elif obj_class is PackageExtra: cols_to_diff.remove('key') # Iterate over each object ID # e.g. for PackageTag, iterate over Tag objects related_obj_ids = set([related_obj.id for related_obj in obj_rev_query.all()]) for related_obj_id in related_obj_ids: q = obj_rev_query.filter(obj_rev_class.id==related_obj_id) to_obj_rev, from_obj_rev = super(Package, self).\ get_obj_revisions_to_diff( q, to_revision, from_revision) for col in cols_to_diff: values = [getattr(obj_rev, col) if obj_rev else '' for obj_rev in (from_obj_rev, to_obj_rev)] value_diff = self._differ(*values) if value_diff: if obj_class.__name__ == 'PackageTag': display_id = to_obj_rev.tag.name elif obj_class.__name__ == 'PackageExtra': display_id = to_obj_rev.key else: display_id = related_obj_id[:4] key = '%s-%s-%s' % (obj_class.__name__, display_id, col) results[key] = value_diff return results
def init_configuration_data(self): '''Default configuration, for when CKAN is first used out of the box. This state may be subsequently configured by the user.''' init_authz_configuration_data() if meta.Session.query(Revision).count() == 0: rev = Revision() rev.author = 'system' rev.message = u'Initialising the Repository' Session.add(rev) self.commit_and_remove()
def find_by_id(cls, id, abort_404 = True, published = True): if published: #I can't see why this exists as events as published, not schedules #Original: result = Session.query(Schedule).filter_by(id=id).filter_by(published=published).first() result = Session.query(Schedule).filter_by(id=id).first() else: result = Session.query(Schedule).filter_by(id=id).first() if result is None and abort_404: abort(404, "No such Schedule") return result
def wrapper(*args, **kawrgs): s = request.environ.get('beaker.session') user_id = s.get('user', 0) if user_id: session = Session() user = session.query(User).filter_by(id=user_id).first() request.user = user else: request.user = None request.result_status = {} body = callback(*args, **kawrgs) return body
def __init__(self): """Load initial application settings from database """ Application.__init__(self) # setup the database session database = 'sqlite:///%s'%os.path.join(APP_DIR,config.get('Inkcut','database_dir'),config.get('Inkcut','database_name')) log.info("Database: %s"%database) engine = create_engine(database) Session.configure(bind=engine) self.session = Session() self.job = None self._flags = {'block_callbacks':True}
def find_by_date(cls, date, primary=False): start = datetime.combine(date, time.min) end = datetime.combine(date, time.max) if primary == True: return Session.query(TimeSlot).filter( TimeSlot.start_time.between( start, end)).filter(TimeSlot.primary == primary).order_by( TimeSlot.start_time).all() else: return Session.query(TimeSlot).filter( TimeSlot.start_time.between(start, end)).order_by( TimeSlot.start_time).all()
def __init__(self): """Load initial application settings from database """ # setup the database session engine = create_engine('sqlite:///%s'%os.path.join(APP_DIR,config.get('Inkcut','database_dir'),config.get('Inkcut','database_name'))) Session.configure(bind=engine) self.session = Session() self.job = None self.ui = { 'main_window':MainWindow(self), 'device_dialog':DeviceDialog(self), } self.statusbar = self.ui['main_window'].widgets['statusbar']
def __init__(self): """Load initial application settings from database """ Application.__init__(self) # setup the database session database = 'sqlite:///%s' % os.path.join( APP_DIR, config.get('Inkcut', 'database_dir'), config.get('Inkcut', 'database_name')) log.info("Database: %s" % database) engine = create_engine(database) Session.configure(bind=engine) self.session = Session() self.job = None self._flags = {'block_callbacks': True}
def install(): Base.metadata.create_all(Session().bind) data = [ ("Chicago", "United States", ("60601", "60602", "60603", "60604")), ("Montreal", "Canada", ("H2S 3K9", "H2B 1V4", "H7G 2T8")), ("Edmonton", "Canada", ("T5J 1R9", "T5J 1Z4", "T5H 1P6")), ("New York", "United States", ("10001", "10002", "10003", "10004", "10005", "10006")), ("San Francisco", "United States", ("94102", "94103", "94104", "94105", "94107", "94108")), ] countries = {} all_post_codes = [] for city, country, postcodes in data: try: country = countries[country] except KeyError: countries[country] = country = Country(country) city = City(city, country) pc = [PostalCode(code, city) for code in postcodes] Session.add_all(pc) all_post_codes.extend(pc) for i in xrange(1, 51): person = Person( "person %.2d" % i, Address(street="street %.2d" % i, postal_code=all_post_codes[random.randint(0, len(all_post_codes) - 1)]), ) Session.add(person) Session.commit() # start the demo fresh Session.remove()
def select_values(self): streams = Session.query(Stream).order_by(Stream.name).all() values = [ (None, '(none)') ] for stream in streams: v = (stream.id, stream.name) values.append(v) return values
def find_by_url(cls, url, abort_404=True): result = Session.query(DbContent).filter_by(url=url).filter( DbContent.publish_timestamp <= datetime.datetime.now()).order_by( DbContent.publish_timestamp.desc()).first() if result is None and abort_404: abort(404, "No such db_content object") return result
def select_values(self): streams = Session.query(Stream).order_by(Stream.name).all() values = [(None, '(none)')] for stream in streams: v = (stream.id, stream.name) values.append(v) return values
def find_all_by_type_id(cls, type_id, abort_404=True): result = Session.query(DbContent).filter_by(type_id=type_id).filter( DbContent.publish_timestamp <= datetime.datetime.now()).order_by( DbContent.publish_timestamp.desc()).all() if result is None and abort_404: abort(404, "No such db_content object") return result
def update_resources(self, res_dicts, autoflush=True): '''Change this package\'s resources. @param res_dicts - ordered list of dicts, each detailing a resource The resource dictionaries contain 'url', 'format' etc. Optionally they can also provide the 'id' of the Resource, to help matching res_dicts to existing Resources. Otherwise, it searches for an otherwise exactly matching Resource. The caller is responsible for creating a revision and committing.''' from ckan import model assert isinstance(res_dicts, (list, tuple)) # Map the incoming res_dicts (by index) to existing resources index_to_res = {} # Match up the res_dicts by id def get_resource_identity(resource_obj_or_dict): if isinstance(resource_obj_or_dict, dict): # Convert dict into a Resource object, since that ensures # all columns exist when you redictize it. This object is # garbage collected as it isn't added to the Session. res_keys = set(resource_obj_or_dict.keys()) - \ set(('id', 'position')) res_dict = dict([(res_key, resource_obj_or_dict[res_key]) \ for res_key in res_keys]) resource = model.Resource(**res_dict) else: resource = resource_obj_or_dict res_dict = resource.as_dict(core_columns_only=True) return res_dict existing_res_identites = [get_resource_identity(res) \ for res in self.resources] for i, res_dict in enumerate(res_dicts): assert isinstance(res_dict, dict) id = res_dict.get('id') if id: res = Session.query(model.Resource).autoflush(autoflush).get(id) if res: index_to_res[i] = res else: res_identity = get_resource_identity(res_dict) try: matching_res_index = existing_res_identites.index(res_identity) except ValueError: continue index_to_res[i] = self.resources[matching_res_index] # Edit resources and create the new ones new_res_list = [] for i, res_dict in enumerate(res_dicts): if i in index_to_res: res = index_to_res[i] for col in set(res_dict.keys()) - set(('id', 'position')): setattr(res, col, res_dict[col]) else: # ignore particular keys that disrupt creation of new resource for key in set(res_dict.keys()) & set(('id', 'position')): del res_dict[key] res = model.Resource(**res_dict) model.Session.add(res) new_res_list.append(res) self.resource_groups[0].resources = new_res_list
def get_entities(entity_class, ids, order=True): ''' Return all entities of the type *entity_class* where id is in *ids*. *entity_class* An slqalchemy model class. *ids* (list of int) A list of ids. *order* (boolean) Return the entities in the same order as *ids* (default: True) Returns A list of model objects ''' if ids == []: return [] from meta import Session db_mapper_attr = ref_attr_value(entity_class) q = Session.query(entity_class).filter(db_mapper_attr.in_(ids)) if not order: return q.all() # order == True: get and order the results all_map = dict((str(ref_attr_value(entity)), entity) for entity in q.all()) ordered_results = [] for id_ in ids: entity = all_map.get(str(id_)) if entity is not None: ordered_results.append(entity) return ordered_results
def find_next_proposal(cls, id, type_id, signed_in_person_id): withdrawn = ProposalStatus.find_by_name('Withdrawn') next = Session.query(Proposal).from_statement(""" SELECT p.id FROM (SELECT id FROM proposal WHERE id <> %d AND status_id <> %d AND proposal_type_id = %d EXCEPT SELECT proposal_id AS id FROM review WHERE review.reviewer_id = %d) AS p LEFT JOIN review AS r ON(p.id=r.proposal_id) GROUP BY p.id ORDER BY COUNT(r.reviewer_id), RANDOM() LIMIT 1 """ % (id, withdrawn.id, type_id, signed_in_person_id)) next = next.first() if next is not None: return next.id else: # looks like you've reviewed everything! return None
def find_accepted_by_id(cls, id): #status = ProposalStatus.find_by_name('Accepted') #result = Session.query(Proposal).filter_by(id=id,status_id=status.id) # Optimisation: assume that ProposalStatus of ID=1 is Accepted result = Session.query(Proposal).filter_by(id=id,status_id=1).one() return result
def find_all_tiered(cls): sponsors = Session.query(Sponsor).order_by(Sponsor.weight).all() tiers = {} for tier in sponsor_tiers: tiers[tier] = [sponsor for sponsor in sponsors if sponsor.tier == tier] return tiers
def find_next_proposal(cls, id, type_id, signed_in_person_id): withdrawn = FundingStatus.find_by_name('Withdrawn') next = Session.query(Funding).from_statement(""" SELECT f.id FROM (SELECT id FROM funding WHERE id <> %d AND status_id <> %d AND funding_type_id = %d EXCEPT SELECT funding_id AS id FROM funding_review WHERE funding_review.reviewer_id = %d) AS f LEFT JOIN funding_review AS r ON(f.id=r.funding_id) GROUP BY f.id ORDER BY COUNT(r.reviewer_id), RANDOM() LIMIT 1 """ % (id, withdrawn.id, type_id, signed_in_person_id)) next = next.first() if next is not None: return next.id else: # looks like you've reviewed everything! return None
def get_relationships(self, with_package=None, type=None, active=True, direction='both'): '''Returns relationships this package has. Keeps stored type/ordering (not from pov of self).''' assert direction in ('both', 'forward', 'reverse') if with_package: assert isinstance(with_package, Package) from package_relationship import PackageRelationship forward_filters = [PackageRelationship.subject==self] reverse_filters = [PackageRelationship.object==self] if with_package: forward_filters.append(PackageRelationship.object==with_package) reverse_filters.append(PackageRelationship.subject==with_package) if active: forward_filters.append(PackageRelationship.state==State.ACTIVE) reverse_filters.append(PackageRelationship.state==State.ACTIVE) if type: forward_filters.append(PackageRelationship.type==type) reverse_type = PackageRelationship.reverse_type(type) reverse_filters.append(PackageRelationship.type==reverse_type) q = Session.query(PackageRelationship) if direction == 'both': q = q.filter(or_( and_(*forward_filters), and_(*reverse_filters), )) elif direction == 'forward': q = q.filter(and_(*forward_filters)) elif direction == 'reverse': q = q.filter(and_(*reverse_filters)) return q.all()
def get_relationships(self, with_package=None, type=None, active=True, direction='both'): '''Returns relationships this package has. Keeps stored type/ordering (not from pov of self).''' assert direction in ('both', 'forward', 'reverse') if with_package: assert isinstance(with_package, Package) from package_relationship import PackageRelationship forward_filters = [PackageRelationship.subject == self] reverse_filters = [PackageRelationship.object == self] if with_package: forward_filters.append(PackageRelationship.object == with_package) reverse_filters.append(PackageRelationship.subject == with_package) if active: forward_filters.append(PackageRelationship.state == State.ACTIVE) reverse_filters.append(PackageRelationship.state == State.ACTIVE) if type: forward_filters.append(PackageRelationship.type == type) reverse_type = PackageRelationship.reverse_type(type) reverse_filters.append(PackageRelationship.type == reverse_type) q = Session.query(PackageRelationship) if direction == 'both': q = q.filter(or_( and_(*forward_filters), and_(*reverse_filters), )) elif direction == 'forward': q = q.filter(and_(*forward_filters)) elif direction == 'reverse': q = q.filter(and_(*reverse_filters)) return q.all()
def getEdgesDegree2ByNxMG(self): MG = nx.MultiGraph() networkMG = Edge_data queryMG = Session.query(networkMG) Session.close() for res in queryMG: ed = res.compute_results(['edge_id','start_node','end_node']) MG.add_edge(ed['start_node'], ed['end_node'], eid=ed['edge_id']) nodes = MG.nodes() for node in nodes: if MG.degree(node) == 2: edge1, edge2 = MG.edges(node) ed1 =MG.get_edge_data(*edge1)[0]['eid'] ed2 = MG.get_edge_data(*edge2)[0]['eid'] #print '%s / %s / %s' %(node, ed1, ed2) yield (ed1, ed2)
def __init__(self): """Load initial application settings from database """ # setup the database session engine = create_engine( 'sqlite:///%s' % os.path.join(APP_DIR, config.get('Inkcut', 'database_dir'), config.get('Inkcut', 'database_name'))) Session.configure(bind=engine) self.session = Session() self.job = None self.ui = { 'main_window': MainWindow(self), 'device_dialog': DeviceDialog(self), } self.statusbar = self.ui['main_window'].widgets['statusbar']
def find_scheduled_by_date_and_type(cls, date, event_type): from schedule import Schedule from event import Event from time_slot import TimeSlot start = datetime.combine(date,time(0,0,0)) end = datetime.combine(date,time(23,59,59)) return Session.query(Location).join(Schedule).join(Event).join(TimeSlot).filter(Event.type==event_type).filter(TimeSlot.start_time.between(start, end)).order_by(Location.display_order).all()
def __init__(self): """ Database loading. """ self.sites = [] # list with all sites objects self.logger = Logger("Manager") self.session = Session() self.sites = self.session.query(Site).all() # loading all sites
def __init__(self, column_name): self.bc = None self.G = nx.Graph() self.network = Network self.column_name = column_name self.query = Session.query(Network) self.ComputeBC() self.UpdateTable()
def get(cls, reference): '''Returns a package object referenced by its id or name.''' query = Session.query(cls).filter(cls.id==reference) query = query.options(eagerload_all('package_tags.tag')) query = query.options(eagerload_all('resource_groups_all.resources_all')) pkg = query.first() if pkg == None: pkg = cls.by_name(reference) return pkg
def get(cls, id_or_name): """Return a Vocabulary object referenced by its id or name, or None if there is no vocabulary with the given id or name. """ query = Session.query(Vocabulary).filter(Vocabulary.id == id_or_name) vocab = query.first() if vocab is None: vocab = Vocabulary.by_name(id_or_name) return vocab
def find_all_by_funding_type_id(cls, id, abort_404 = True, include_withdrawn=True): result = Session.query(Funding).filter_by(funding_type_id=id) if not include_withdrawn: withdrawn = FundingStatus.find_by_name('Withdrawn') result = result.filter(Funding.status_id != withdrawn.id) result = result.all() if result is None and abort_404: abort(404, "No such funding object") return result
def find_all_by_proposal_type_id(cls, id, abort_404 = True, include_withdrawn=True): result = Session.query(Proposal).filter_by(proposal_type_id=id) if not include_withdrawn: withdrawn = ProposalStatus.find_by_name('Withdrawn') result = result.filter(Proposal.status_id != withdrawn.id) result = result.all() if result is None and abort_404: abort(404, "No such proposal object") return result
def schedule_by_time_slot(self, time_slot): from location import Location return ( Session.query(Schedule) .filter(Schedule.event == self) .filter(Schedule.time_slot == time_slot) .order_by(Schedule.overflow) .all() )
def load_entry(entry): '''Load a feedparser entry into KeyValue objects. @return: list of KeyValue objects created. ''' name = entry.title.lower().strip() # may be of form "Word of the Day: Baker" if ':' in name: name = name.split(':')[1].strip() ns = u'word' objid = name notes = entry.content[0]['value'] key = u'notes' # upsert ... # does not work ... # kv = KeyValue(ns=ns, object_id=objid, key=key, value=notes) kv = KeyValue.upsert([ns, objid, key], value=notes) Session.commit() return [kv]
def load_entry(entry): '''Load a feedparser entry into KeyValue objects. @return: list of KeyValue objects created. ''' name = entry.title.lower().strip() # may be of form "Word of the Day: Baker" if ':' in name: name = name.split(':')[1].strip() ns = u'word' objid = name notes = entry.content[0]['value'] key=u'notes' # upsert ... # does not work ... # kv = KeyValue(ns=ns, object_id=objid, key=key, value=notes) kv = KeyValue.upsert([ns,objid,key], value=notes) Session.commit() return [kv]
def find_by_proposal_reviewer(cls, proposal_id, reviewer_id, abort_404=True): result = Session.query(Review).filter_by( proposal_id=proposal_id).filter_by( reviewer_id=reviewer_id).first() if result is None and abort_404: abort(404, "No such review object") return result
def get(cls, key, category=default_category): """ Get an entry from the config key store. """ fetch = Session.query(cls).get((category, key)) if (not fetch): log.warning("Config request for missing key: %s, %s", category, key) # Missing entries are returned as an empty string # This is the least obvious when directly exposed to the user return fetch.value if fetch else ""
def find_by_date(cls, date, primary=False): from time_slot import TimeSlot start = datetime.combine(date, time.min) end = datetime.combine(date, time.max) return Session.query(Schedule).options( sa.orm.eagerload_all('time_slot.schedule'), sa.orm.eagerload('location'), sa.orm.eagerload_all('event.proposal.people')).join( TimeSlot).filter(TimeSlot.start_time.between( start, end)).order_by(TimeSlot.start_time).all()
def find_all_by_funding_type_id(cls, id, abort_404=True, include_withdrawn=True): result = Session.query(Funding).filter_by(funding_type_id=id) if not include_withdrawn: withdrawn = FundingStatus.find_by_name('Withdrawn') result = result.filter(Funding.status_id != withdrawn.id) result = result.all() if result is None and abort_404: abort(404, "No such funding object") return result
def delete_all(self): for obj in [ Posting, Transaction, Account, Slice, KeyValue, EnumerationValue, Key ]: Session.query(obj).delete() Session.commit() Session.remove()
def load_word_info_from_feed(feed_url=None): '''Load word information (e.g. notes) from a set of entries supplied via an (atom) feed. @parm feed_url: if not provided using value from config "word_of_the_day.feed" ''' if not feed_url: from pylons import config cfg_key = 'word_of_the_day.feed' feed_url = config.get(cfg_key, '') if not feed_url: msg = 'Need a feed_url - not specified in config (%s)' % cfg_key raise ValueError(msg) # do not make a global dependency import feedparser feed = feedparser.parse(feed_url) for idx, entry in enumerate(feed.entries): out = load_entry(entry) if idx == 0: word = out[0].object_id # update current wotd to latest entry (first one) setting = KeyValue.upsert([u'config',u'word_of_the_day',u'current'], value=word) Session.commit()
def install(): Base.metadata.create_all(Session().bind) data = [('Chicago', 'United States', ('60601', '60602', '60603', '60604')), ('Montreal', 'Canada', ('H2S 3K9', 'H2B 1V4', 'H7G 2T8')), ('Edmonton', 'Canada', ('T5J 1R9', 'T5J 1Z4', 'T5H 1P6')), ('New York', 'United States', ('10001', '10002', '10003', '10004', '10005', '10006')), ('San Francisco', 'United States', ('94102', '94103', '94104', '94105', '94107', '94108'))] countries = {} all_post_codes = [] for city, country, postcodes in data: try: country = countries[country] except KeyError: countries[country] = country = Country(country) city = City(city, country) pc = [PostalCode(code, city) for code in postcodes] Session.add_all(pc) all_post_codes.extend(pc) for i in xrange(1, 51): person = Person( "person %.2d" % i, Address(street="street %.2d" % i, postal_code=all_post_codes[random.randint( 0, len(all_post_codes) - 1)])) Session.add(person) Session.commit() # start the demo fresh Session.remove()
def find_by_date(cls, date, primary=False): from time_slot import TimeSlot start = datetime.combine(date, time.min) end = datetime.combine(date, time.max) return ( Session.query(Schedule) .options( sa.orm.eagerload_all("time_slot.schedule"), sa.orm.eagerload("location"), sa.orm.eagerload_all("event.proposal.people"), ) .join(TimeSlot) .filter(TimeSlot.start_time.between(start, end)) .order_by(TimeSlot.start_time) .all() )
def load_name_range(start, end, invalidate=False): """Load Person objects on a range of names. start/end are integers, range is then "person <start>" - "person <end>". The cache option we set up is called "name_range", indicating a range of names for the Person class. The `Person.addresses` collections are also cached. Its basically another level of tuning here, as that particular cache option can be transparently replaced with joinedload(Person.addresses). The effect is that each Person and his/her Address collection is cached either together or separately, affecting the kind of SQL that emits for unloaded Person objects as well as the distribution of data within the cache. """ q = Session.query(Person).\ filter(Person.name.between("person %.2d" % start, "person %.2d" % end)).\ options(cache_address_bits).\ options(FromCache("default", "name_range")) # have the "addresses" collection cached separately # each lazyload of Person.addresses loads from cache. q = q.options(RelationshipCache("default", "by_person", Person.addresses)) # alternatively, eagerly load the "addresses" collection, so that they'd # be cached together. This issues a bigger SQL statement and caches # a single, larger value in the cache per person rather than two # separate ones. #q = q.options(joinedload(Person.addresses)) # if requested, invalidate the cache on current criterion. if invalidate: q.invalidate() return q.all()
def find_by_id(self, id): return Session.query(Travel).get(id)