def is_class_sa_mapped(klass): try: class_mapper(klass) except UnmappedInstanceError: return False return True
def get_models(self): """return a dict containing all model names as key and url as value""" models = {} if isinstance(self.model, list): for model in self.model: key = model.__name__ models[key] = model_url(self.collection_name, model_name=key) else: for key, obj in self.model.__dict__.iteritems(): if not key.startswith("_"): if Document is not None: try: if issubclass(obj, Document): models[key] = model_url(self.collection_name, model_name=key) continue except: pass try: class_mapper(obj) except: continue if not isinstance(obj, type): continue models[key] = model_url(self.collection_name, model_name=key) return models
def copy_model_object(cls, model, found=None): """ :param model: model to make a copy of :param found: used when function is called recursivly. Found is used to track references if two entities have references to each other both ways, will loop for ever :return: a copy of the model object, without primary keys set """ if found is None: found = [] result = model.__class__() pk_keys = set([c.key for c in class_mapper(model.__class__).primary_key]) for p in class_mapper(model.__class__).columns: if p.key not in pk_keys: result.__setattr__(p.key, model.__getattribute__(p.key)) for name, relation in class_mapper(model.__class__).relationships.items(): if relation not in found: found.append(relation) if relation.uselist: for child in model.__getattribute__(name): result.__getattribute__(name).append(cls.copy_model_object(child, found)) else: result.__setattr__(name, cls.copy_model_object(model.__getattribute__(name), found)) return result
def models(self, **kwargs): """Models index page""" request = self.request models = {} if isinstance(request.models, list): for model in request.models: if has_permission('view', model, request): key = model.__name__ models[key] = request.fa_url(key, request.format) else: for key, obj in request.models.__dict__.iteritems(): if not key.startswith('_'): if Document is not None: try: if issubclass(obj, Document): if has_permission('view', obj, request): models[key] = request.fa_url(key, request.format) continue except: pass try: class_mapper(obj) except: continue if not isinstance(obj, type): continue if has_permission('view', obj, request): models[key] = request.fa_url(key, request.format) if kwargs.get('json'): return models return self.render(models=models)
def test_mapper_args_declared_attr_two(self): # same as test_mapper_args_declared_attr, but we repeat # ComputedMapperArgs on both classes for no apparent reason. class ComputedMapperArgs: @declared_attr def __mapper_args__(cls): if cls.__name__ == 'Person': return {'polymorphic_on': cls.discriminator} else: return {'polymorphic_identity': cls.__name__} class Person(Base, ComputedMapperArgs): __tablename__ = 'people' id = Column(Integer, primary_key=True) discriminator = Column('type', String(50)) class Engineer(Person, ComputedMapperArgs): pass configure_mappers() assert class_mapper(Person).polymorphic_on \ is Person.__table__.c.type eq_(class_mapper(Engineer).polymorphic_identity, 'Engineer')
def get_trigger_tables(): """Determines which tables need to have triggers set on them. Returns a dictionary of table names (key) with a dictionary (value) that provides additional information about a table: * list of primary keys for each table. * whether it's an entity table """ tables = collections.OrderedDict() # mapping of table names to their models and their "kind" (direct or not) for _, entity in SCHEMA.items(): # Entity table itself mapped_class = class_mapper(entity.model) tables[mapped_class.mapped_table.name] = { "model": entity.model, "is_direct": True, "has_gid": mapped_class.has_property('gid'), } # Tables that contain the referenced column # TODO(roman): maybe come up with a better description above for path in unique_split_paths([path for field in entity.fields for path in field.paths if field.trigger]): model = last_model_in_path(entity.model, path) if model is not None: table_name = class_mapper(model).mapped_table.name if table_name not in tables: tables[table_name] = { "model": model, "is_direct": False, } return tables
def test_non_propagating_mixin(self): class NoJoinedTableNameMixin: @declared_attr def __tablename__(cls): if decl.has_inherited_table(cls): return None return cls.__name__.lower() class BaseType(Base, NoJoinedTableNameMixin): discriminator = Column('type', String(50)) __mapper_args__ = dict(polymorphic_on=discriminator) id = Column(Integer, primary_key=True) value = Column(Integer()) class Specific(BaseType): __mapper_args__ = dict(polymorphic_identity='specific') eq_(BaseType.__table__.name, 'basetype') eq_(list(BaseType.__table__.c.keys()), ['type', 'id', 'value']) assert Specific.__table__ is BaseType.__table__ assert class_mapper(Specific).polymorphic_on \ is BaseType.__table__.c.type eq_(class_mapper(Specific).polymorphic_identity, 'specific')
def models(self, **kwargs): """Models index page""" request = self.request models = {} if isinstance(self.model, list): for model in self.model: key = model.__name__ models[key] = self.route_url(key, request.format) else: for key, obj in self.model.__dict__.iteritems(): if not key.startswith('_'): if Document is not None: try: if issubclass(obj, Document): models[key] = self.route_url(key, request.format) continue except: pass try: class_mapper(obj) except: continue if not isinstance(obj, type): continue models[key] = self.route_url(key, request.format) return self.render(models=models)
def setUp(test): placelesssetup.setUp() # Attempt to initialize mappers only if their not already mapped. try: orm.class_mapper(schema.Content) except orm.exc.UnmappedClassError: schema.initialize_mapper() component.provideAdapter(transform.StringTransform) component.provideAdapter(transform.IntegerTransform) component.provideAdapter(transform.FloatTransform) component.provideAdapter(transform.DateTimeTransform) component.provideAdapter(transform.LinesTransform) component.provideAdapter(transform.BooleanTransform) component.provideAdapter(transform.FileTransform) component.provideAdapter(transform.PhotoTransform) component.provideAdapter(transform.ReferenceTransform) component.provideUtility(peer.PeerRegistry()) component.provideAdapter( peer.PeerFactory, (interfaces.IMirrored, interfaces.ISchemaTransformer)) component.provideAdapter(transform.SchemaTransformer, (interfaces.IMirrored, interfaces.IMetaData)) component.provideAdapter(serializer.Serializer, (interfaces.IMirrored,)) component.provideAdapter( operation.OperationFactory, (interfaces.IMirrored,)) component.provideUtility(operation.OperationBufferFactory())
def reflect(cls): def _get_attribute(cls, p): manager = manager_of_class(cls) return manager[p.key] pkeys = [c.key for c in class_mapper(cls).primary_key] # attributes we're interested in attrs = [] for p in class_mapper(cls).iterate_properties: attr = _get_attribute(cls, p) if getattr(p, '_is_polymorphic_discriminator', False): continue if isinstance(attr.impl, DynamicAttributeImpl): continue if isinstance(p, SynonymProperty): continue if isinstance(attr.impl, CollectionAttributeImpl): continue if isinstance(attr.impl, ScalarObjectAttributeImpl): continue if attr.key in pkeys: continue if isinstance(attr.impl, ScalarAttributeImpl): attrs.append(attr) #fields.AttributeField(attr, self) return attrs
def test_polymorphic_option(self): """ Test that polymorphic loading sets state.load_path with its actual mapper on a subclass, and not the superclass mapper. """ paths = [] class MyOption(interfaces.MapperOption): propagate_to_loaders = True def process_query_conditionally(self, query): paths.append(query._current_path) sess = create_session() names = ['dilbert', 'pointy haired boss'] dilbert, boss = ( sess.query(Person) .options(MyOption()) .filter(Person.name.in_(names)) .order_by(Person.name).all()) dilbert.machines boss.paperwork eq_(paths, [(class_mapper(Engineer), 'machines'), (class_mapper(Boss), 'paperwork')])
def on_after_update(mapper, connection, target): request = getattr(target, '_request', None) from .documents import BaseDocument # Reindex old one-to-one related object committed_state = attributes.instance_state(target).committed_state columns = set() for field, value in committed_state.items(): if isinstance(value, BaseDocument): obj_session = object_session(value) # Make sure object is not updated yet if not obj_session.is_modified(value): obj_session.expire(value) index_object(value, with_refs=False, request=request) else: id_pos = field.rfind('_id') if id_pos >= 0: rel_name = field[:id_pos] rel = mapper.relationships.get(rel_name, False) if rel and any(c.name == field for c in rel.local_columns): columns.add(rel_name) # Reload `target` to get access to processed fields values columns = columns.union([c.name for c in class_mapper(target.__class__).columns]) object_session(target).expire(target, attribute_names=columns) index_object(target, request=request, with_refs=False, nested_only=True) # Reindex the item's parents. This must be done after the child has been processes for parent, children_field in target.get_parent_documents(nested_only=True): columns = [c.name for c in class_mapper(parent.__class__).columns] object_session(parent).expire(parent, attribute_names=columns) ES(parent.__class__.__name__).index_nested_document(parent, children_field, target)
def add_model(self, model): try: class_mapper(model) except: raise ConfigError('%s is not a valid model.') if model not in self.model_list: self.model_list.append(model)
def __init__(self, config = None): BaseDB.__init__(self, config=config) self.db = DBManager(config=config) self.session = self.db.session self.tweet_table = self.db.get_table(TWEETS_TABLE_NAME) self.user_table = self.db.get_table(USER_TABLE_NAME) self.user_meta_table = self.db.get_table(USER_META_TABLE_NAME) self.friends_table = self.db.get_table(FRIENDS_TABLE_NAME) self.followers_table = self.db.get_table(FOLLOWERS_TABLE_NAME) try: self.tweet_mapper = class_mapper(TweetObj) except UnmappedClassError: self.tweet_mapper = mapper(TweetObj,self.tweet_table) try: self.user_mapper = class_mapper(userObj) except UnmappedClassError: self.user_mapper = mapper(userObj,self.user_table) try: self.user_meta_mapper = class_mapper(userMetaObj) except UnmappedClassError: self.user_meta_mapper = mapper(userMetaObj,self.user_meta_table) try: self.friend_mapper = class_mapper(friendObj) except UnmappedClassError: self.friend_mapper = mapper(friendObj,self.friends_table) try: self.follower_mapper = class_mapper(followerObj) except UnmappedClassError: self.follower_mapper = mapper(followerObj,self.followers_table)
def __init__(self, config=None): TweetsDB.__init__(self, config=config) try: self.tweet_mapper = class_mapper(ElectTweetObj) except UnmappedClassError: self.tweet_mapper = mapper(ElectTweetObj,self.tweet_table) try: self.user_mapper = class_mapper(ElectUserObj) except UnmappedClassError: self.user_mapper = mapper(ElectUserObj,self.user_table) try: self.user_meta_mapper = class_mapper(ElectUserMetaObj) except UnmappedClassError: self.user_meta_mapper = mapper(ElectUserMetaObj,self.user_meta_table) try: self.friend_mapper = class_mapper(ElectFriendObj) except UnmappedClassError: self.friend_mapper = mapper(ElectFriendObj,self.friends_table) try: self.follower_mapper = class_mapper(ElectFollowerObj) except UnmappedClassError: self.follower_mapper = mapper(ElectFollowerObj,self.followers_table)
def _modify_params_for_relationships(self, entity, params, delete_first=True): mapper = class_mapper(entity) relations = self.get_relations(entity) for relation in relations: if relation in params: prop = mapper.get_property(relation) target = prop.argument if inspect.isfunction(target): target = target() value = params[relation] if value: if prop.uselist and isinstance(value, list): target_obj = [] for v in value: try: object_mapper(v) target_obj.append(v) except UnmappedInstanceError: if hasattr(target, 'primary_key'): pk = target.primary_key else: pk = class_mapper(target).primary_key if isinstance(v, basestring) and "/" in v: v = map(self._adapt_type, v.split("/"), pk) v = tuple(v) else: v = self._adapt_type(v, pk[0]) #only add those items that come back new_v = self.session.query(target).get(v) if new_v is not None: target_obj.append(new_v) elif prop.uselist: try: object_mapper(value) target_obj = [value] except UnmappedInstanceError: mapper = target if not isinstance(target, Mapper): mapper = class_mapper(target) if isinstance(mapper.primary_key[0].type, Integer): value = int(value) target_obj = [self.session.query(target).get(value)] else: try: object_mapper(value) target_obj = value except UnmappedInstanceError: if isinstance(value, basestring) and "/" in value: value = map(self._adapt_type, value.split("/"), prop.remote_side) value = tuple(value) else: value = self._adapt_type(value, prop.remote_side[0]) target_obj = self.session.query(target).get(value) params[relation] = target_obj else: del params[relation] return params
def is_model_mapped(domain_model): # try get mapper to force UnmappedClassError try: orm.class_mapper(domain_model) return True except orm.exc.UnmappedClassError: # unmapped class e.g. Address, Version return False
def all_rels_including_subclasses(model_cls): return remove_duplicates( class_mapper(model_cls).relationships.items() + flatten( [class_mapper(subcls).relationships.items() for subcls in all_subclasses(model_cls)] ) )
def test_insp_relationship_prop(self): User = self.classes.User Address = self.classes.Address prop = inspect(User.addresses) is_(prop, User.addresses) is_(prop.parent, class_mapper(User)) is_(prop._parentmapper, class_mapper(User)) is_(prop.mapper, class_mapper(Address))
def __init__(self, cls, excludes=None, includes=None, nullables=None): self.cls = cls self._mapper = class_mapper(cls) self.excludes = excludes or set() self.includes = includes or set() self.nullables = nullables or {} self.pkeys = [col.name for col in self._mapper.primary_key] self.fkeys = {} self.rkeys = {} self.attrs = {} self.fields = set() self.relationships = set() self.references = set() self.collections = set() if self.includes and self.excludes: raise ValueError("includes and excludes parameters are exclusive, specify only one of them") for p in self._mapper.iterate_properties: if isinstance(p, ColumnProperty): self.attrs[p.key] = p.columns[0] self.fields.add(p.key) elif isinstance(p, RelationshipProperty): if callable(p.argument): cls = p.argument() else: cls = p.argument.class_ self.attrs[p.key] = cls self.relationships.add(p.key) self.rkeys[p.key] = [col.name for col in class_mapper(cls).primary_key] if p.uselist: self.collections.add(p.key) else: self.references.add(p.key) self.fkeys[p.key] = OrderedDict() for col in p._calculated_foreign_keys: if col.table in self._mapper.tables: for f in col.foreign_keys: self.fkeys[p.key][col.name] = f.column.name if not self.fkeys[p.key]: self.fkeys.pop(p.key) else: msg = "Unsupported property type: {}".format(type(p)) NotImplementedError(msg) self._log = getLogger(__name__) self._log.debug("Registry created.") self._log.debug("Keys: %s", self.pkeys) self._log.debug("Foreign Keys: %s", self.fkeys) self._log.debug("Fieds: %s", self.fields) self._log.debug("Relationships: %s", self.relationships) self._log.debug("Relationships Keys: %s, %s", self.rkeys.keys(), self.rkeys.values()) self._log.debug("References: %s", self.references) self._log.debug("Collections: %s", self.collections)
def relate(cls, propname, *args, **kwargs): """Produce a relationship between this mapped table and another one. This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship` construct. """ class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs))
def initialize(self, *args, **kwargs): self.model_set = {} for model_admin in site.model_admins: try: model = model_admin.model class_mapper(model) self.model_set.update({model.__name__: (model, model_admin)}) except: pass
def register_view(cls): """Create the URL routes for the view. Standard :class:`kit.util.View` implementation plus subview support. """ super(View, cls).register_view() if cls.subviews: model = cls.__model__ all_keys = set( model._get_relationships( lazy=['dynamic', True, 'select'], uselist=True ).keys() + model._get_association_proxies().keys() ) if cls.subviews == True: keys = all_keys else: keys = set(cls.subviews) if keys - all_keys: raise ValueError('%s invalid for subviews' % (keys - all_keys, )) keys = all_keys & keys for key in keys: collection_route = '/%s/%s/%s/' % ( cls.base_url, '/'.join( '<%s>' % k.name for k in class_mapper(model).primary_key ), key, ) model_route = '/%s/%s/%s/<position>' % ( cls.base_url, '/'.join( '<%s>' % k.name for k in class_mapper(model).primary_key ), key ) make_view( cls.__app__, view_class=_RelationshipView, view_name='%s_%s' % (cls.endpoint, key), __model__=model, __assoc_key__=key, parser=cls.parser, endpoint='%s_%s' % (cls.endpoint, key), methods=['GET', ], rules={ collection_route: ['GET', ], model_route: ['GET', ], }, )
def is_class_sa_mapped(klass): if not isinstance(klass, type): klass = type(klass) try: class_mapper(klass) except UnmappedInstanceError: return False return True
def _get_related_class(self, entity, relation): entity = resolve_entity(entity) mapper = class_mapper(entity) prop = mapper.get_property(relation) target = resolve_entity(prop.argument) if not hasattr(target, 'class_'): target = class_mapper(target) return target.class_
def _inspect_model_attributes(model): keys = [attribute.key for attribute in class_mapper(model).column_attrs] columns = [attribute for attribute in class_mapper(model).columns] fields = dict() for key, column in zip(keys, columns): fields[key] = column return fields
def process_relationships(klass, was_deferred=False): # first, we loop through all of the relationships defined on the # class, and make sure that the related class already has been # completely processed and defer processing if it has not defer = False for propname, reldesc in klass.relations.items(): found = (reldesc.classname == klass.__name__ or reldesc.classname in __processed_classes__) if not found: defer = True break # next, we loop through all the columns looking for foreign keys # and make sure that we can find the related tables (they do not # have to be processed yet, just defined), and we defer if we are # not able to find any of the related tables if not defer: for col in klass.columns: if col.foreign_keys: found = False cn = col.foreign_keys[0]._colspec table_name = cn[:cn.rindex('.')] for other_klass in ActiveMapperMeta.classes.values(): if other_klass.table.fullname.lower() == table_name.lower(): found = True if not found: defer = True break if defer and not was_deferred: __deferred_classes__[klass.__name__] = klass # if we are able to find all related and referred to tables, then # we can go ahead and assign the relationships to the class if not defer: relations = {} for propname, reldesc in klass.relations.items(): reldesc.process(klass, propname, relations) class_mapper(klass).add_properties(relations) if klass.__name__ in __deferred_classes__: del __deferred_classes__[klass.__name__] __processed_classes__[klass.__name__] = klass # finally, loop through the deferred classes and attempt to process # relationships for them if not was_deferred: # loop through the list of deferred classes, processing the # relationships, until we can make no more progress last_count = len(__deferred_classes__) + 1 while last_count > len(__deferred_classes__): last_count = len(__deferred_classes__) deferred = __deferred_classes__.copy() for deferred_class in deferred.values(): process_relationships(deferred_class, was_deferred=True)
def map_locations_table(self): """Maps locations table to Location class for Plain Weather application.""" if self._locations_table == None: with closing(self._db_engine.connect()) as connection: metadata = MetaData(connection) #Presuming that locations table has already been created explicity SQL and SqlAlchemy reflection. self._locations_table = Table(self._locations_table_name, metadata, autoload=True) try: class_mapper(Location) except UnmappedClassError: mapper(Location, self._locations_table)
def MapStatesTable(self): """Maps states table to States class for County application.""" if self._states_table == None: with closing(self._db_engine.connect()) as connection: metadata = MetaData(connection) #Presuming that states table has already been created explicity SQL and SqlAlchemy reflection. self._states_table = Table(self._states_table_name, metadata, autoload=True) try: class_mapper(State) except UnmappedClassError: mapper(State, self._states_table)
def test_rel_accessors(self): User = self.classes.User Address = self.classes.Address prop = inspect(User.addresses) is_(prop.property.parent, class_mapper(User)) is_(prop.property.mapper, class_mapper(Address)) is_(prop.parent, class_mapper(User)) is_(prop.mapper, class_mapper(Address)) assert not hasattr(prop, 'columns') assert hasattr(prop, 'expression')
def attribute_names(cls): return [ prop.key for prop in class_mapper(cls).iterate_properties if isinstance(prop, ColumnProperty) ]
def column_names(modelcls): return [c.name for c in class_mapper(WeblogEntry).columns]
def test_mapper(self): user_mapper = class_mapper(User) assert serializer.loads(serializer.dumps(user_mapper, -1), None, None) is user_mapper
def to_dict(self): columns = [c.key for c in class_mapper(self.__class__).columns] return dict((c, getattr(self, c)) for c in columns) '''
def serialize(model): columns = [c.key for c in class_mapper(model.__class__).columns] return dict((c, getattr(model, c)) for c in columns)
def get_table(self, model_class): table = orm.class_mapper(model_class).mapped_table return table
def addOrModify(self, table, params): ''' Using the PrimaryKeys of the table, it looks for the record in the database. If it is there, it is updated, if not, it is inserted as a new entry. :param table: table where to add or modify :type table: str :param params: dictionary of what to add or modify :type params: dict :return: S_OK() || S_ERROR() ''' session = self.sessionMaker_o() found = False for ext in self.extensions: try: table_c = getattr(__import__(ext + __name__, globals(), locals(), [table]), table) found = True break except (ImportError, AttributeError): continue # If not found in extensions, import it from DIRAC base (this same module). if not found: table_c = getattr(__import__(__name__, globals(), locals(), [table]), table) primaryKeys = [key.name for key in class_mapper(table_c).primary_key] try: select = Query(table_c, session=session) for columnName, columnValue in params.iteritems(): if not columnValue or columnName not in primaryKeys: continue column_a = getattr(table_c, columnName.lower()) if isinstance(columnValue, (list, tuple)): select = select.filter(column_a.in_(list(columnValue))) elif isinstance(columnValue, six.string_types): select = select.filter(column_a == columnValue) else: self.log.error("type(columnValue) == %s" % type(columnValue)) res = select.first() # the selection is done via primaryKeys only if not res: # if not there, let's insert it (and exit) return self.insert(table, params) # From now on, we assume we need to modify # Treating case of time value updates if not params.get('LastCheckTime'): params['LastCheckTime'] = None if not params.get('DateEffective'): params['DateEffective'] = None # Should we change DateEffective? changeDE = False if params.get('Status'): if params.get('Status') != res.status: # we update dateEffective iff we change the status changeDE = True for columnName, columnValue in params.iteritems(): if columnName == 'LastCheckTime' and not columnValue: # we always update lastCheckTime columnValue = datetime.datetime.utcnow().replace(microsecond=0) if changeDE and columnName == 'DateEffective' and not columnValue: columnValue = datetime.datetime.utcnow().replace(microsecond=0) if columnValue: if isinstance(columnValue, datetime.datetime): columnValue = columnValue.replace(microsecond=0) setattr(res, columnName.lower(), columnValue) session.commit() # and since we modified, we now insert a new line in the log table return self.insert(table.replace('Status', '') + 'Log', params) # The line inserted will maybe become a History line thanks to the SummarizeLogsAgent except exc.SQLAlchemyError as e: session.rollback() self.log.exception("addOrModify: unexpected exception", lException=e) return S_ERROR("addOrModify: unexpected exception %s" % e) finally: session.close()
def serialize(model): """Transforms a model into a dictionary which can be dumped to JSON.""" # first we get the names of all the columns on your model columns = [c.key for c in class_mapper(model.__class__).columns] # then we return their values in a dict return dict((c, getattr(model, c)) for c in columns)
def stringify(self): _key = [self.__table__.name] for f in class_mapper(self.__class__).primary_key: _key.append(str(getattr(self, f.name))) return '.'.join(_key)
def SerializeliData(model, userid=None, dataprocessing=None, notreturn=[]): ''' 单对象序列化器 ''' li = [] dicts = {} from sqlalchemy.orm import class_mapper columns = [c.key for c in class_mapper(model.__class__).columns] for c in columns: print(c) if str(c) not in notreturn: try: cc = eval(getattr(model, c)) if isinstance(cc, dict): li.append((c, cc)) if isinstance(cc, list): li.append((c, cc)) except: if type(getattr(model, c)) == datetime.datetime: li.append((c, DateTimeForStr(getattr(model, c)))) elif type(getattr(model, c)) == datetime.date: li.append((c, DateForStr(getattr(model, c)))) else: if getattr(model, c) == '' or getattr(model, c) == None: li.append((c, '')) else: li.append((c, getattr(model, c))) # 自增条件-------------------------------------------- if c == 'author_userid' and userid != None: li.append(('follow_status', GetFollowType(userid, getattr(model, c)))) if c == 'parent_comment_id': if int(getattr(model, 'seedtype')) == 1: li.append(('parent_comment_id', getattr(model, 'id'))) if dataprocessing == 'queryvideo': videotag = VideoTag.query.filter( VideoTag.videoid == getattr(model, 'id')).all() dataprocessing_taglist = [{ 'tagid': i.tagid, 'tagname': VideoTagName.query.filter_by( id=i.tagid).first().tagname } for i in videotag] li.append(('taglist', dataprocessing_taglist)) else: pass for s in li: dicts.update(dict([s])) return dicts
def get_column_metadata(class_, colname): prop = class_mapper(class_).get_property(colname) md = _get_column_metadata(prop) if md is None: raise ValueError("Not a column name: %r." % (colname, )) return md
def model_to_dict(model): model_dict = {} for key, column in class_mapper(model.__class__).c.items(): model_dict[column.name] = getattr(model, key, None) return model_dict
def retrieve_or_create(cls, session, keys, create=True, update=True): """return database object corresponding to keys """ logger.debug('initial value of keys: %s' % keys) ## first try retrieving is_in_session = cls.retrieve(session, keys) logger.debug('2 value of keys: %s' % keys) if not create and not is_in_session: logger.debug('returning None (1)') return None if is_in_session and not update: logger.debug("returning not updated existing %s" % is_in_session) return is_in_session try: ## some fields are given as text but actually correspond to ## different fields and should be associated to objects extradict = cls.compute_serializable_fields(session, keys) ## what fields must be corrected cls.correct_field_names(keys) except error.NoResultException: if not is_in_session: logger.debug("returning None (2)") return None else: extradict = {} logger.debug('3 value of keys: %s' % keys) ## at this point, resulting object is either in database or not. in ## either case, the database is going to be updated. ## link_keys are python-side properties, not database associations ## and have as value objects that are possibly in the database, or ## not, but they cannot be used to construct the `self` object. link_values = {} for k in cls.link_keys: if keys.get(k): link_values[k] = keys[k] for k in keys.keys(): if k not in class_mapper(cls).mapped_table.c: del keys[k] if 'id' in keys: del keys['id'] keys.update(extradict) ## completing the task of building the links logger.debug("links? %s, %s" % (cls.link_keys, keys.keys())) for key in cls.link_keys: d = link_values.get(key) if d is None: continue logger.debug('recursive call to construct_from_dict %s' % d) obj = construct_from_dict(session, d) keys[key] = obj if is_in_session and update: result = is_in_session logger.debug("going to update %s with %s" % (result, keys)) if 'id' in keys: del keys['id'] for k, v in keys.items(): if v is not None: setattr(result, k, v) logger.debug('returning updated existing %s' % result) return result result = cls(**keys) session.add(result) session.flush() logger.debug('returning new %s' % result) return result
def create_object_version(mapper_fn, base_object, rev_table): '''Create the Version Domain Object corresponding to base_object. E.g. if Package is our original object we should do:: # name of Version Domain Object class PackageVersion = create_object_version(..., Package, ...) NB: This must obviously be called after mapping has happened to base_object. ''' # TODO: can we always assume all versioned objects are stateful? # If not need to do an explicit check class MyClass(StatefulObjectMixin, SQLAlchemyMixin): pass name = base_object.__name__ + 'Revision' MyClass.__name__ = name MyClass.__continuity_class__ = base_object # Must add this so base object can retrieve revisions ... base_object.__revision_class__ = MyClass ourmapper = mapper_fn( MyClass, rev_table, properties={ # NB: call it all_revisions_... rather than just revisions_... as it # will yield all revisions not just those less than the current # revision 'continuity': relation(base_object, backref=backref('all_revisions_unordered', cascade='all, delete, delete-orphan'), order_by=rev_table.c.revision_id.desc()), # 'continuity':relation(base_object), }, order_by=[rev_table.c.continuity_id, rev_table.c.revision_id.desc()]) base_mapper = class_mapper(base_object) # add in 'relationship' stuff from continuity onto revisioned obj # 3 types of relationship # 1. scalar (i.e. simple fk) # 2. list (has many) (simple fk the other way) # 3. list (m2m) (join table) # # Also need to check whether related object is revisioned # # If related object is revisioned then can do all of these # If not revisioned can only support simple relation (first case -- why?) for prop in base_mapper.iterate_properties: try: is_relation = prop.__class__ == sqlalchemy.orm.properties.PropertyLoader except AttributeError: # SQLAlchemy 0.9 is_relation = prop.__class__ == sqlalchemy.orm.properties.RelationshipProperty if is_relation: # in sqlachemy 0.4.2 # prop_remote_obj = prop.select_mapper.class_ # in 0.4.5 prop_remote_obj = prop.argument remote_obj_is_revisioned = getattr(prop_remote_obj, '__revisioned__', False) # this is crude, probably need something better is_many = (prop.secondary != None or prop.uselist) if remote_obj_is_revisioned: propname = prop.key add_fake_relation(MyClass, propname, is_many=is_many) elif not is_many: ourmapper.add_property(prop.key, relation(prop_remote_obj)) else: # TODO: actually deal with this # raise a warning of some kind msg = 'Skipping adding property %s to revisioned object' % prop # Issue #3 not considered for over two years, so removing this # annoying log message. # This doesn\'t seem to be a problem for ckan. #logger.info(msg) return MyClass
def modify_base_object_mapper(base_object, revision_obj, state_obj): base_mapper = class_mapper(base_object) base_mapper.add_property('revision', relation(revision_obj))
def test_merge_w_relationship(self): A, C, B, c_table, b_table, a_table, Dest, dest_table = ( self.classes.A, self.classes.C, self.classes.B, self.tables.c_table, self.tables.b_table, self.tables.a_table, self.classes.Dest, self.tables.dest_table, ) ajoin = polymorphic_union({ "a": a_table, "b": b_table, "c": c_table }, "type", "ajoin") mapper( A, a_table, with_polymorphic=("*", ajoin), polymorphic_on=ajoin.c.type, polymorphic_identity="a", properties={ "some_dest": relationship(Dest, back_populates="many_a") }, ) mapper( B, b_table, inherits=A, concrete=True, polymorphic_identity="b", properties={ "some_dest": relationship(Dest, back_populates="many_a") }, ) mapper( C, c_table, inherits=A, concrete=True, polymorphic_identity="c", properties={ "some_dest": relationship(Dest, back_populates="many_a") }, ) mapper( Dest, dest_table, properties={ "many_a": relationship(A, back_populates="some_dest", order_by=ajoin.c.id) }, ) assert C.some_dest.property.parent is class_mapper(C) assert B.some_dest.property.parent is class_mapper(B) assert A.some_dest.property.parent is class_mapper(A) sess = sessionmaker()() dest1 = Dest(name="d1") dest2 = Dest(name="d2") a1 = A(some_dest=dest2, aname="a1") b1 = B(some_dest=dest1, bname="b1") c1 = C(some_dest=dest2, cname="c1") sess.add_all([dest1, dest2, c1, a1, b1]) sess.commit() sess2 = sessionmaker()() merged_c1 = sess2.merge(c1) eq_(merged_c1.some_dest.name, "d2") eq_(merged_c1.some_dest_id, c1.some_dest_id)
class ExternalReport(DeclarativeMappedObject): __tablename__ = 'external_reports' __table_args__ = {'mysql_engine': 'InnoDB'} id = Column(Integer, primary_key=True) name = Column(Unicode(100), unique=True, nullable=False) url = Column(Unicode(10000), nullable=False) description = Column(Unicode(1000), default=None) def __init__(self, *args, **kw): super(ExternalReport, self).__init__(*args, **kw) # Delayed property definitions due to circular dependencies class_mapper(LabController).add_property('dyn_systems', dynamic_loader(System)) class_mapper(System).add_properties({ # The relationship to 'recipe' is complicated # by the polymorphism of SystemResource :-( 'recipes': relationship( Recipe, viewonly=True, secondary=RecipeResource.__table__.join(SystemResource.__table__), secondaryjoin=and_(SystemResource.__table__.c.id == RecipeResource.id, RecipeResource.recipe_id == Recipe.id)), 'dyn_recipes': dynamic_loader( Recipe, secondary=RecipeResource.__table__.join(SystemResource.__table__), secondaryjoin=and_(SystemResource.__table__.c.id == RecipeResource.id,
def create_object_version(mapper_fn, base_object, rev_table): '''Create the Version Domain Object corresponding to base_object. E.g. if Package is our original object we should do:: # name of Version Domain Object class PackageVersion = create_object_version(..., Package, ...) NB: This must obviously be called after mapping has happened to base_object. ''' # TODO: can we always assume all versioned objects are stateful? # If not need to do an explicit check class MyClass(object): def __init__(self, **kw): for k, v in kw.iteritems(): setattr(self, k, v) name = base_object.__name__ + u'Revision' MyClass.__name__ = str(name) MyClass.__continuity_class__ = base_object # Must add this so base object can retrieve revisions ... base_object.__revision_class__ = MyClass ourmapper = mapper_fn( MyClass, rev_table, # NB: call it all_revisions_... rather than just revisions_... as it # will yield all revisions not just those less than the current # revision # --------------------- # Deviate from VDM here # # properties={ # 'continuity':relation(base_object, # backref=backref('all_revisions_unordered', # cascade='all, delete, delete-orphan'), # order_by=rev_table.c.revision_id.desc() # ), # }, # order_by=[rev_table.c.continuity_id, rev_table.c.revision_id.desc()] # --------------------- ) base_mapper = class_mapper(base_object) # add in 'relationship' stuff from continuity onto revisioned obj # 3 types of relationship # 1. scalar (i.e. simple fk) # 2. list (has many) (simple fk the other way) # 3. list (m2m) (join table) # # Also need to check whether related object is revisioned # # If related object is revisioned then can do all of these # If not revisioned can only support simple relation (first case -- why?) for prop in base_mapper.iterate_properties: try: is_relation = prop.__class__ == \ sqlalchemy.orm.properties.PropertyLoader except AttributeError: # SQLAlchemy 0.9 is_relation = prop.__class__ == \ sqlalchemy.orm.properties.RelationshipProperty if is_relation: # in sqlachemy 0.4.2 # prop_remote_obj = prop.select_mapper.class_ # in 0.4.5 prop_remote_obj = prop.argument remote_obj_is_revisioned = \ getattr(prop_remote_obj, u'__revisioned__', False) # this is crude, probably need something better is_many = (prop.secondary is not None or prop.uselist) if remote_obj_is_revisioned: propname = prop.key add_fake_relation(MyClass, propname, is_many=is_many) elif not is_many: ourmapper.add_property(prop.key, relation(prop_remote_obj)) else: # TODO: actually deal with this # raise a warning of some kind msg = \ u'Skipping adding property %s to revisioned object' % prop return MyClass
def mapping_exists(model): try: orm.class_mapper(model) return True except orm_exc.UnmappedClassError: return False
def to_dict(self, *args, prefix=''): # TODO: OZ by OZ: this function is wrong. we need walk through requested fields and return appropriate attribute. # Now we walk through attribute (yes?) ret = {} __debug = True req_columns = {} req_relationships = {} def add_to_req_relationships(column_name, columns): if column_name not in req_relationships: req_relationships[column_name] = [] req_relationships[column_name].append(columns) def get_next_level(child, nextlevelargs, prefix, standard_fields_required): in_next_level_dict = {k: v for k, v in child.items() if k in nextlevelargs} if \ isinstance(child, dict) else child.to_dict(*nextlevelargs, prefix=prefix) if standard_fields_required: in_next_level_dict.update(child if isinstance(child, dict) else child.get_client_side_dict()) return in_next_level_dict for arguments in args: if arguments: for argument in re.compile('\s*,\s*').split(arguments): columnsdevided = argument.split('.') column_names = columnsdevided.pop(0) for column_name in column_names.split('|'): if len(columnsdevided) == 0: req_columns[column_name] = True else: add_to_req_relationships(column_name, '.'.join(columnsdevided)) columns = class_mapper(self.__class__).columns relations = { a: b for (a, b) in class_mapper(self.__class__).relationships.items() } for a, b in class_mapper(self.__class__).all_orm_descriptors.items(): if isinstance(b, AssociationProxy): relations[a] = b # association_proxies = {a: b for (a, b) in class_mapper(self.__class__).all_orm_descriptors.items() # if isinstance(b, AssociationProxy)} pass for col in columns: if col.key in req_columns or (__debug and '*' in req_columns): ret[col.key] = self.to_dict_object_property(col.key) if col.key in req_columns: del req_columns[col.key] if '*' in req_columns and __debug: del req_columns['*'] del_req_columns_in_attrs = [] for colname in req_columns: if hasattr(self, colname) and colname not in relations: del_req_columns_in_attrs.append(colname) ret[colname] = getattr(self, colname) for colname in del_req_columns_in_attrs: del req_columns[colname] if len(req_columns) > 0: columns_not_in_relations = list( set(req_columns.keys()) - set(relations.keys())) if len(columns_not_in_relations) > 0: raise ValueError( "you requested not existing attribute(s) `%s%s`" % ( prefix, '`, `'.join(columns_not_in_relations), )) else: for rel_name in req_columns: add_to_req_relationships(rel_name, '~') # raise ValueError("you requested for attribute(s) but " # "relationships found `%s%s`" % ( # prefix, '`, `'.join(set(relations.keys()). # intersection( # req_columns.keys())),)) for relationname, relation in relations.items(): rltn = relations[relation.target_collection] if isinstance( relation, AssociationProxy) else relation if relationname in req_relationships or (__debug and '*' in req_relationships): if relationname in req_relationships: nextlevelargs = req_relationships[relationname] del req_relationships[relationname] else: nextlevelargs = req_relationships['*'] related_obj = getattr(self, relationname) standard_fields_required = False while '~' in nextlevelargs: standard_fields_required = True nextlevelargs.remove('~') if rltn.uselist: add = [ get_next_level(child, nextlevelargs, prefix + relationname + '.', standard_fields_required) for child in related_obj ] else: add = None if related_obj is None else \ get_next_level(related_obj, nextlevelargs, prefix + relationname + '.', standard_fields_required) ret[relationname] = add if '*' in req_relationships: del req_relationships['*'] del_req_columns_in_attrs = [] for relname, nextlevelargs in req_relationships.items(): if hasattr(self, relname): del_req_columns_in_attrs.append(relname) add = utils.filter_json( getattr(self, relname), * nextlevelargs) if nextlevelargs else getattr( self, relname) ret[relname] = utils.dict_merge_recursive( ret[relname] if relname in ret else {}, add) for colname in del_req_columns_in_attrs: del req_relationships[colname] if len(req_relationships) > 0: relations_not_in_columns = list( set(req_relationships.keys()) - set(columns)) if len(relations_not_in_columns) > 0: raise ValueError( "you requested not existing relation(s) `%s%s`" % ( prefix, '`, `'.join(relations_not_in_columns), )) else: raise ValueError( "you requested for relation(s) but " "column(s) found `%s%s`" % ( prefix, '`, `'.join( set(columns).intersection(req_relationships)), )) return ret
def main(): for table in sorted(tables.all_tables(), key=lambda t: t.__name__): datafilename = dir + '/' + table.__tablename__ + '.csv' classname = table.__name__ if hasattr(table, 'object_table'): # This is an auxilliary table; it'll be processed with the main one continue else: print "%s: %s" % (classname, table.__tablename__) with open(datafilename) as datafile: datacsv = csv.reader(datafile, lineterminator='\n') orig_fields = datacsv.next() columns = class_mapper(table).c new_fields = [] main_out = [] outputs = {datafilename: main_out} name_out = None srcfiles = [datafilename] # Set new_fields to a list of FieldSpec object, one for each field we want in the csv for column in columns: name = column.name if name == 'identifier': new_fields.append( FieldSpec(datafilename, column.name, MakeFieldFuncs.ident)) elif name in orig_fields: new_fields.append( FieldSpec(datafilename, column.name, MakeFieldFuncs.copy)) elif name == 'id': new_fields.append( FieldSpec(datafilename, column.name, MakeFieldFuncs.newid)) elif name == 'language_id': new_fields.insert( 2, FieldSpec(datafilename, column.name, MakeFieldFuncs.en)) else: raise AssertionError(name) # Remember headers headers = {datafilename: list(field.name for field in new_fields)} # Pretty prnt :) for field in new_fields: print ' [{0.func.func_name:5}] {0.name}'.format(field) # Do pretty much the same for aux tables aux_tables = [] for attrname in 'text_table prose_table'.split(): aux_table = getattr(table, attrname, None) if aux_table: aux_datafilename = dir + '/' + aux_table.__tablename__ + '.csv' print " %s: %s" % (aux_table.__name__, aux_table.__tablename__) srcfiles.append(datafilename) aux_tables.append(aux_table) columns = class_mapper(aux_table).c aux_out = [] outputs[aux_datafilename] = aux_out aux_fields = [] for column in columns: name = column.name if name == 'language_id': aux_fields.insert( 1, FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.en)) elif name == 'name' and table.__name__ == 'ItemFlag': aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.f_id)) elif name == 'description' and table.__name__ == 'ItemFlag': aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.name)) elif name in orig_fields and name == 'name' and table.__name__ in 'PokemonColor ContestType BerryFirmness'.split( ): # Capitalize these names aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.Name)) elif name in orig_fields and name in 'color flavor'.split( ) and table.__name__ == 'ContestType': aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.Main)) elif name in orig_fields: aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.main)) elif name == table.__singlename__ + '_id': aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.srcid)) elif name == 'name': aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.Name)) elif name == 'lang_id': aux_fields.append( FieldSpec(aux_datafilename, column.name, MakeFieldFuncs.srcid)) else: print orig_fields raise AssertionError(name) if name == 'name': # If this table contains the name, remember that name_fields = aux_fields name_out = aux_out # Sort aux tables nicely def key(f): if f.func == MakeFieldFuncs.srcid: return 0 elif f.name == 'language_id': return 1 elif f.name == 'name': return 2 else: return 10 aux_fields.sort(key=key) new_fields += aux_fields headers[aux_datafilename] = list(field.name for field in aux_fields) # Pretty print :) for field in aux_fields: print ' [{0.func.func_name:5}] {0.name}'.format( field) # Do nothing if the table's the same if all(field.func == MakeFieldFuncs.copy for field in new_fields): print u' → skipping' continue # Otherwise read the file # outputs will be a (filename -> list of rows) dict print u' → reading' for autoincrement_id, src_row in enumerate(datacsv, start=1): row = dict(zip(orig_fields, src_row)) new_rows = defaultdict(list) for field in new_fields: new_rows[field.out].append( field.func( source=row, field_name=field.name, i=autoincrement_id, )) for name, row in new_rows.items(): outputs[name].append(row) # If there was a _names table, read that and append it to the # aux table that has names try: name_datafilename = dir + '/' + table.__singlename__ + '_names.csv' name_file = open(name_datafilename) except (AttributeError, IOError): pass else: print u' → reading foreign names' with name_file: namecsv = csv.reader(name_file, lineterminator='\n') src_fields = namecsv.next() obj_id_fieldname = table.__singlename__ + '_id' assert src_fields == [obj_id_fieldname, 'language_id', 'name'] for name_row in namecsv: name_dict = dict(zip(src_fields, name_row)) row = [] for field in name_fields: row.append(name_dict.get(field.name, '')) name_out.append(row) os.unlink(name_datafilename) # For all out files, write a header & sorted rows print u' → writing' for filename, rows in outputs.items(): with open(filename, 'w') as outfile: outcsv = csv.writer(outfile, lineterminator='\n') outcsv.writerow(headers[filename]) rows.sort(key=tuple_key) for row in rows: outcsv.writerow(row)
def mappers(*args): return [class_mapper(x) for x in args]
def mappers(*args): from sqlalchemy.orm import class_mapper return [class_mapper(x) for x in args]
def catalyst( _context, class_, descriptor, #view_module=None, # !+? interface_module=bungeni.models.interfaces, #container_module=None, # !+? ui_module=bungeni.ui.content, echo=False): from alchemist.catalyst.zcml import CatalystContext #!+ALCHEMIST_INTERNAL ctx = CatalystContext() ctx.descriptor = descriptor ctx.domain_model = class_ ctx.mapper = orm.class_mapper(class_) ctx.interface_module = interface_module #ctx.mapper = ctx.container_module = None # !+container_module(mr, jul-2011), expected # to be defined by alchemist.catalyst.container.GenerateContainer ctx.ui_module = ui_module ctx.echo = echo ctx.views = {} # keyed by view type (add|edit) ctx.relation_viewlets = {} # keyed by relation name ctx.logger = log ctx.logger.debug("context=%s, class=%s, descriptor=%s, echo=%s" % (_context, class_, descriptor, echo)) global logging_setup # !+? if ctx.echo and not logging_setup: logging_setup = True logging.basicConfig() formatter = logging.Formatter("ALCHEMIST: %(module)s -> %(message)s") console = logging.StreamHandler() console.setLevel(logging.DEBUG) console.setFormatter(formatter) ctx.logger.addHandler(console) ctx.logger.setLevel(logging.DEBUG) #console.propagate = False ctx.logger.propagate = False try: # create a domain interface if it doesn't already exist # this also creates an adapter between the interface and desc. GenerateDomainInterface(ctx) from alchemist.catalyst.domain import ApplySecurity #!+ALCHEMIST_INTERNAL ApplySecurity(ctx) # behavior.ApplyIndexing() # behavior.ApplyWorkflows() # behavior.ApplyVersioning() # create a container class GenerateContainer(ctx) # generate collection traversal GenerateCollectionTraversal(ctx) except: import sys, traceback, pdb traceback.print_exc() pdb.post_mortem(sys.exc_info()[-1]) raise
def _get_keys(cls): """return column names for this model """ return class_mapper(cls).c.keys()
def GenerateDomainInterface(ctx, interface_name=None): # when called from zcml, most likely we'll get a class not an instance # if it is a class go ahead and call instantiate it if isinstance(ctx.descriptor, type): ctx.descriptor = ctx.descriptor() # if the interface module is none, then use the nearest one to the domain class if ctx.interface_module is None: ctx.interface_module = _get_interface_module_for(ctx) # interface for domain model if not interface_name: interface_name = "I%s" % (ctx.domain_model.__name__) if ctx.echo: ctx.logger.debug("%s: generated interface %s.%s " % (ctx.domain_model.__name__, ctx.interface_module.__name__, interface_name)) from alchemist.catalyst.domain import getDomainInterfaces #!+ALCHEMIST_INTERNAL bases, implements = getDomainInterfaces(ctx.domain_model) # use the class"s mapper select table as input for the transformation domain_mapper = orm.class_mapper(ctx.domain_model) ## 0.4 and 0.5 compatibility, 0.5 has the table as local_table (select_table) is none lazy gen? #domain_table = getattr(domain_mapper, "local_table", domain_mapper.select_table) # The 0.6 has no attribute select_table attribute. We still have 0.4 # compitability thought domain_table = (domain_mapper.local_table if sa_version[1] >= 5 else domain_mapper.select_table) # if the domain model already implements a model interface, use it # instead of generating a new one for iface in interface.implementedBy(ctx.domain_model): if (IIModelInterface.providedBy(iface) and iface.__name__ == interface_name): domain_interface = iface break else: domain_interface = sa2zs.transmute( domain_table, annotation=ctx.descriptor, interface_name=interface_name, __module__=ctx.interface_module.__name__, bases=bases) # if we're replacing an existing interface, make sure the new # interface implements it old = getattr(ctx.interface_module, interface_name, None) if old is not None: implements.append(old) implements.insert(0, domain_interface) # ensure interfaces are unique, preserving the order #implements = [ ifc for i,ifc in enumerate(implements) # if implements.index(ifc)==i ] # # XXX: Oooh, strangely the above does not work... it turns out that # implements contains seemingly repeated interfaces e.g. the first and last # interfaces are both "<InterfaceClass bungeni.models.interfaces.IReport>" # but, they are not the same! So, to compare unique we use the string # representation of each interface: # str_implements = map(str, implements) # implements = [ ifc for i,ifc in enumerate(implements) # if str_implements.index(str(ifc))==i ] # Ooops making the interfaces unique breaks other things downstream :( interface.classImplementsOnly(ctx.domain_model, *implements) setattr(ctx.interface_module, interface_name, domain_interface) ctx.domain_interface = domain_interface
def map_to(self, attrname, tablename=None, selectable=None, schema=None, base=None, mapper_args=util.immutabledict()): """Configure a mapping to the given attrname. This is the "master" method that can be used to create any configuration. (new in 0.6.6) :param attrname: String attribute name which will be established as an attribute on this :class:.`.SqlSoup` instance. :param base: a Python class which will be used as the base for the mapped class. If ``None``, the "base" argument specified by this :class:`.SqlSoup` instance's constructor will be used, which defaults to ``object``. :param mapper_args: Dictionary of arguments which will be passed directly to :func:`.orm.mapper`. :param tablename: String name of a :class:`.Table` to be reflected. If a :class:`.Table` is already available, use the ``selectable`` argument. This argument is mutually exclusive versus the ``selectable`` argument. :param selectable: a :class:`.Table`, :class:`.Join`, or :class:`.Select` object which will be mapped. This argument is mutually exclusive versus the ``tablename`` argument. :param schema: String schema name to use if the ``tablename`` argument is present. """ if attrname in self._cache: raise InvalidRequestError( "Attribute '%s' is already mapped to '%s'" % ( attrname, class_mapper(self._cache[attrname]).mapped_table )) if tablename is not None: if not isinstance(tablename, str): raise ArgumentError("'tablename' argument must be a string." ) if selectable is not None: raise ArgumentError("'tablename' and 'selectable' " "arguments are mutually exclusive") selectable = Table(tablename, self._metadata, autoload=True, autoload_with=self.bind, schema=schema or self.schema) elif schema: raise ArgumentError("'tablename' argument is required when " "using 'schema'.") elif selectable is not None: if not isinstance(selectable, expression.FromClause): raise ArgumentError("'selectable' argument must be a " "table, select, join, or other " "selectable construct.") else: raise ArgumentError("'tablename' or 'selectable' argument is " "required.") if not selectable.primary_key.columns: if tablename: raise PKNotFoundError( "table '%s' does not have a primary " "key defined" % tablename) else: raise PKNotFoundError( "selectable '%s' does not have a primary " "key defined" % selectable) mapped_cls = _class_for_table( self.session, self.engine, selectable, base or self.base, mapper_args ) self._cache[attrname] = mapped_cls return mapped_cls
def get_column_types(self): return { column.name: column.type.python_type for column in class_mapper(self.model).columns if column.name not in self.ignore_columns }
def relate(cls, propname, *args, **kwargs): class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs))
def serialize_model(model): from sqlalchemy.orm import class_mapper columns = [c.key for c in class_mapper(model.__class__).columns] return dict((c, getattr(model, c)) for c in columns)