def test_deseralize(self): User = self.classes.User Address = self.classes.Address umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) eq_( PathRegistry.deserialize([(User, "addresses"), (Address, "email_address")]), p1 ) eq_( PathRegistry.deserialize([(User, "addresses"), (Address, None)]), p2 ) eq_( PathRegistry.deserialize([(User, "addresses")]), p3 )
def test_remove_offline_entities(): server1 = factories.Server(online=False, offline_since=datetime.now() - timedelta(minutes=5)) server2 = factories.Server(online=False, offline_since=datetime.now() - timedelta(minutes=20)) server3 = factories.Server(online=True, offline_since=datetime.now() - timedelta(hours=2)) # meaning it went offline 2 hours ago, but is online again, thus it should not be deleted player1 = factories.Player(server=server1, online=True) # by mistake still set to be online player2 = factories.Player(server=server2) player3 = factories.Player(server=server3, online=True) player4 = factories.Player(server=server3, online=False) db_session.add_all([server1, server2, server3]) db_session.commit() settings.KEEP_OFFLINE_SERVERS_FOR_MINUTES = 10 models.remove_offline_entities() assert not inspect(server1).was_deleted assert inspect(server2).was_deleted assert not inspect(server3).was_deleted assert server3.offline_since is None assert inspect(player1).was_deleted assert inspect(player2).was_deleted assert not inspect(player3).was_deleted assert inspect(player4).was_deleted settings.KEEP_OFFLINE_SERVERS_FOR_MINUTES = 0 models.remove_offline_entities() assert inspect(server1).was_deleted assert not inspect(server3).was_deleted assert not inspect(player3).was_deleted
def test_relations_cascade(self): from sqlalchemy import inspect from ..models import Group, User group = self._make(Group(name="foo")) user1 = self._make( User( username="******", encrypted_password="******", ident="fooident", name="Nameless Foo", groups=[group], ) ) user2 = self._make( User( username="******", encrypted_password="******", ident="foo2ident", name="Nameless Foo2", groups=[group], ) ) self.dbsession.commit() self.dbsession.delete(group) self.dbsession.commit() self.assertTrue(inspect(group).was_deleted) self.assertFalse(inspect(user1).was_deleted) self.assertFalse(inspect(user2).was_deleted)
def test_indexed_key(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) eq_(path[1], umapper.attrs.addresses) eq_(path[3], amapper.attrs.email_address)
def test_eq(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) u_alias = inspect(aliased(self.classes.User)) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p3 = PathRegistry.coerce((umapper, umapper.attrs.name)) p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses)) p5 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p6 = PathRegistry.coerce((amapper, amapper.attrs.user, umapper, umapper.attrs.addresses)) p7 = PathRegistry.coerce((amapper, amapper.attrs.user, umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) is_(p1 == p2, True) is_(p1 == p3, False) is_(p1 == p4, False) is_(p1 == p5, False) is_(p6 == p7, False) is_(p6 == p7.parent.parent, True) is_(p1 != p2, False) is_(p1 != p3, True) is_(p1 != p4, True) is_(p1 != p5, True)
def for_manager(self, manager, cls): strategy = manager.option(cls, 'strategy') operation_type_column = manager.option( cls, 'operation_type_column_name' ) excluded_columns = [ c.name for c in sa.inspect(cls).columns if manager.is_excluded_column(cls, c) ] return self( update_validity_for_tables=( sa.inspect(cls).tables if strategy == 'validity' else [] ), version_table_name_format=manager.option(cls, 'table_name'), operation_type_column_name=operation_type_column, transaction_column_name=manager.option( cls, 'transaction_column_name' ), end_transaction_column_name=manager.option( cls, 'end_transaction_column_name' ), use_property_mod_tracking=uses_property_mod_tracking(manager), excluded_columns=excluded_columns, table=cls.__table__, extension_schema=manager.option(cls, 'extension_schema') )
def test_indexed_entity(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address)) is_(path[0], umapper) is_(path[2], amapper)
def test_info(self): A = self._fixture() inspect(A).all_orm_descriptors.value.info["some key"] = "some value" eq_( inspect(A).all_orm_descriptors.value.info, {"some key": "some value"} )
def test_attrs_props_prop_added_after_configure(self): class AnonClass(object): pass from sqlalchemy.orm import mapper, column_property from sqlalchemy.ext.hybrid import hybrid_property m = mapper(AnonClass, self.tables.users) eq_( set(inspect(AnonClass).attrs.keys()), set(['id', 'name'])) eq_( set(inspect(AnonClass).all_orm_descriptors.keys()), set(['id', 'name'])) m.add_property('q', column_property(self.tables.users.c.name)) def desc(self): return self.name AnonClass.foob = hybrid_property(desc) eq_( set(inspect(AnonClass).attrs.keys()), set(['id', 'name', 'q'])) eq_( set(inspect(AnonClass).all_orm_descriptors.keys()), set(['id', 'name', 'q', 'foob']))
def inheritance_args(self, cls, version_table, table): """ Return mapper inheritance args for currently built history model. """ args = {} if not sa.inspect(self.model).single: parent = find_closest_versioned_parent(self.manager, self.model) if parent: # The version classes do not contain foreign keys, hence we # need to map inheritance condition manually for classes that # use joined table inheritance if parent.__table__.name != table.name: mapper = sa.inspect(self.model) reflector = VersionExpressionParser() inherit_condition = reflector(mapper.inherit_condition) tx_column_name = self.manager.options["transaction_column_name"] args["inherit_condition"] = sa.and_( inherit_condition, getattr(parent.__table__.c, tx_column_name) == getattr(cls.__table__.c, tx_column_name), ) args["inherit_foreign_keys"] = [ version_table.c[column.key] for column in sa.inspect(self.model).columns if column.primary_key ] args.update(copy_mapper_args(self.model)) return args
def get_parent(self, attr_name): # first, try grabbing it directly parent = getattr(self, attr_name) if parent: return parent # if nothing was found, grab the fk and lookup manually mapper = inspect(type(self)) attr = getattr(type(self), attr_name) prop = attr.property local_col, remote_col = prop.local_remote_pairs[0] local_prop = mapper.get_property_by_column(local_col) value = getattr(self, local_prop.key) if not value: # no relation and no fk = no parent return None parent_cls = type(self).get_related_class(attr_name) mapper = inspect(parent_cls) remote_prop = mapper.get_property_by_column(remote_col) filters = {remote_prop.key: value} orm = ORM.get() session = orm.sessionmaker() parent = session.query(parent_cls).filter_by(**filters).first() return parent
def log_activity(self, sender, actor, verb, object, target=None): assert self.running if not isinstance(object, Entity): # generic forms may send signals inconditionnaly. For now we have activity # only for Entities return session = object_session(object) kwargs = dict(actor=actor, verb=verb, object_type=object.entity_type) if sa.inspect(object).deleted: # object is in deleted state: flush has occurred, don't reference it or # we'll have an error when adding entry to session kwargs['object_id'] = object.id else: kwargs['object'] = object if target is not None: kwargs['target_type'] = target.entity_type if sa.inspect(target).deleted: kwargs['target_id'] = target.id else: kwargs['target'] = target entry = ActivityEntry(**kwargs) entry.object_type = object.entity_type session.add(entry)
def create_highlight(self, **options): """ Returns an error message (string) if something went wrong, otherwise returns True """ if self.online is False or self.current_stream_chunk is None: return 'The stream is not online' if self.current_stream_chunk.video_url is None: return 'No video URL fetched for this chunk yet, try in 5 minutes' try: highlight = StreamChunkHighlight(self.current_stream_chunk, **options) session = DBManager.create_session(expire_on_commit=False) session.add(highlight) session.add(self.current_stream_chunk) session.commit() session.close() x = inspect(self.current_stream_chunk) log.info('{0.transient} - {0.pending} - {0.persistent} - {0.detached}'.format(x)) x = inspect(highlight) log.info('{0.transient} - {0.pending} - {0.persistent} - {0.detached}'.format(x)) x = inspect(self.current_stream) log.info('{0.transient} - {0.pending} - {0.persistent} - {0.detached}'.format(x)) log.info(self.current_stream.id) log.info(highlight.id) log.info(self.current_stream_chunk.id) except: log.exception('uncaught exception in create_highlight') return 'Unknown reason, ask pajlada' return True
def test_plain_aliased_compound(self): Company = _poly_fixtures.Company Person = _poly_fixtures.Person Engineer = _poly_fixtures.Engineer cmapper = inspect(Company) emapper = inspect(Engineer) c_alias = aliased(Company) p_alias = aliased(Person) c_alias = inspect(c_alias) p_alias = inspect(p_alias) p1 = PathRegistry.coerce( (c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines) ) # plain AliasedClass - the path keeps that AliasedClass directly # as is in the path eq_( p1.path, ( c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines, ), )
def get_tables(mixed): """ Return a list of tables associated with given SQLAlchemy object. Let's say we have three classes which use joined table inheritance TextItem, Article and BlogPost. Article and BlogPost inherit TextItem. :: get_tables(Article) # [Table('article', ...), Table('text_item')] get_tables(Article()) get_tables(Article.__mapper__) .. versionadded: 0.26.0 :param mixed: SQLAlchemy Mapper / Declarative class or a SA Alias object wrapping any of these objects. """ if isinstance(mixed, sa.orm.util.AliasedClass): mapper = sa.inspect(mixed).mapper else: if not isclass(mixed): mixed = mixed.__class__ mapper = sa.inspect(mixed) return mapper.tables
def _assert_option(self, entity_list, option): Item = self.classes.Item q = create_session().query(*entity_list).\ options(joinedload(option)) key = ('loader', (inspect(Item), inspect(Item).attrs.keywords)) assert key in q._attributes
def test_length(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) pneg1 = PathRegistry.coerce(()) p0 = PathRegistry.coerce((umapper,)) p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses)) p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper)) p3 = PathRegistry.coerce( ( umapper, umapper.attrs.addresses, amapper, amapper.attrs.email_address, ) ) eq_(len(pneg1), 0) eq_(len(p0), 1) eq_(len(p1), 2) eq_(len(p2), 3) eq_(len(p3), 4) eq_(pneg1.length, 0) eq_(p0.length, 1) eq_(p1.length, 2) eq_(p2.length, 3) eq_(p3.length, 4)
def test_join_query(self): """ Test join(dict) """ ssn = self.Session() # Test: join() with comments as dict user = models.User.mongoquery(ssn).filter({"id": 1}).join({"comments": None}).end().one() self.assertEqual(user.id, 1) self.assertEqual(inspect(user).unloaded, {"articles"}) ssn.close() # need to reset the session: it caches entities and gives bad results # Test: join() with filtered articles user = ( models.User.mongoquery(ssn) .filter({"id": 1}) .join({"articles": {"project": ["id", "title"], "filter": {"id": 10}, "limit": 1}}) .end() .one() ) self.assertEqual(user.id, 1) self.assertEqual(inspect(user).unloaded, {"comments"}) self.assertEqual([10], [a.id for a in user.articles]) # Only one article! :) self.assertEqual( inspect(user.articles[0]).unloaded, {"user", "comments", "uid", "data"} ) # No relationships loaded, and projection worked
def test_alembic_revision_fddb3cfe7a9c(alembic_app): ext = alembic_app.extensions['invenio-db'] if db.engine.name == 'sqlite': raise pytest.skip('Upgrades are not supported on SQLite.') db.drop_all() drop_alembic_version_table() inspector = inspect(db.engine) assert 'inspire_prod_records' not in inspector.get_table_names() assert 'workflows_audit_logging' not in inspector.get_table_names() assert 'workflows_pending_record' not in inspector.get_table_names() ext.alembic.upgrade(target='fddb3cfe7a9c') inspector = inspect(db.engine) assert 'inspire_prod_records' in inspector.get_table_names() assert 'workflows_audit_logging' in inspector.get_table_names() assert 'workflows_pending_record' in inspector.get_table_names() ext.alembic.downgrade(target='a82a46d12408') inspector = inspect(db.engine) assert 'inspire_prod_records' not in inspector.get_table_names() assert 'workflows_audit_logging' not in inspector.get_table_names() assert 'workflows_pending_record' not in inspector.get_table_names() drop_alembic_version_table()
def test_insert_and_object_states(caplog): engine = create_engine('sqlite:///:memory:', echo=True) Base.metadata.create_all(engine) session = sessionmaker(bind=engine)() user = User(name='Jeremy', fullname='Jeremy Kao') assert user not in session assert inspect(user).transient session.add(user) assert inspect(user).pending assert user.id is None caplog.clear() session.commit() sqls = [r.message for r in caplog.records] assert inspect(user).persistent assert user.id == 1 assert sqls == [ 'BEGIN (implicit)', 'INSERT INTO user (name, fullname) VALUES (?, ?)', "('Jeremy', 'Jeremy Kao')", 'COMMIT' ]
def handle_cycle_task_group_object_task_put( sender, obj=None, src=None, service=None): # noqa pylint: disable=unused-argument if inspect(obj).attrs.status.history.has_changes(): # TODO: check why update_cycle_object_parent_state destroys object history # when accepting the only task in a cycle. The listener below is a # workaround because of that. Signals.status_change.send( obj.__class__, objs=[ Signals.StatusChangeSignalObjectContext( instance=obj, new_status=obj.status, old_status=inspect(obj).attrs.status.history.deleted[0], ) ] ) # Doing this regardless of status.history.has_changes() is important in order # to update objects that have been declined. It updates the os_last_updated # date and last_updated_by. with benchmark("handle CycleTask put"): if getattr(obj.task_group_task, 'object_approval', None): for tgobj in obj.task_group_task.task_group.objects: if obj.status == 'Verified': tgobj.modified_by = get_current_user() tgobj.set_reviewed_state() db.session.flush()
def _get_person_link(self, data, extra_data=None): extra_data = extra_data or {} person = get_event_person(self.event, data, create_untrusted_persons=self.create_untrusted_persons, allow_external=True) person_data = {'title': next((x.value for x in UserTitle if data.get('title') == orig_string(x.title)), UserTitle.none), 'first_name': data.get('firstName', ''), 'last_name': data['familyName'], 'affiliation': data.get('affiliation', ''), 'address': data.get('address', ''), 'phone': data.get('phone', ''), 'display_order': data['displayOrder']} person_data.update(extra_data) person_link = None if self.object and inspect(person).persistent: person_link = self.person_link_cls.find_first(person=person, object=self.object) if not person_link: person_link = self.person_link_cls(person=person) person_link.populate_from_dict(person_data) email = data.get('email', '').lower() if email != person_link.email: if not self.event or not self.event.persons.filter_by(email=email).first(): person_link.person.email = email person_link.person.user = get_user_by_email(email) if inspect(person).persistent: signals.event.person_updated.send(person_link.person) else: raise UserValueError(_('There is already a person with the email {}').format(email)) return person_link
def test_postload_immutability(self): i1 = IdOnly(is_regular=1, is_immutable=2, is_cached=3) i2 = IdUuid(is_regular='a', is_immutable='b', is_cached='c') i3 = UuidOnly(is_regular='x', is_immutable='y', is_cached='z') self.session.add_all([i1, i2, i3]) self.session.commit() id1 = i1.id id2 = i2.id id3 = i3.id # Delete objects so SQLAlchemy's session cache can't populate fields from them del i1, i2, i3 # Using `query.get` appears to ignore the `load_only` option, # so we use `query.filter_by` pi1 = IdOnly.query.options(db.load_only('id')).filter_by(id=id1).one() pi2 = IdUuid.query.options(db.load_only('id')).filter_by(id=id2).one() pi3 = UuidOnly.query.options(db.load_only('id')).filter_by(id=id3).one() # Confirm there is no value for is_immutable self.assertIs(inspect(pi1).attrs.is_immutable.loaded_value, NO_VALUE) self.assertIs(inspect(pi2).attrs.is_immutable.loaded_value, NO_VALUE) self.assertIs(inspect(pi3).attrs.is_immutable.loaded_value, NO_VALUE) # Immutable columns are immutable even if not loaded with self.assertRaises(ImmutableColumnError): pi1.is_immutable = 20 with self.assertRaises(ImmutableColumnError): pi2.is_immutable = 'bb' with self.assertRaises(ImmutableColumnError): pi3.is_immutable = 'yy'
def add_superclass_path(self, column, cls, alias_maker): path = [] for i, sup in enumerate(sqla_inheritance_with_conditions(cls)): # if getattr(inspect(sup), 'local_table', None) is None: # continue condition = inspect(cls).inherit_condition if condition is not None: alias_maker.add_condition(condition) if i: path.append(SuperClassRelationship(sup, cls)) cls = sup alias_maker.alias_from_relns(*path) if _columnish(column): local_keys = {c.key for c in inspect(cls).local_table.columns} if column.key in local_keys: column = getattr(cls, column.key) return column elif _propertish(column): if isinstance(column, InstrumentedAttribute): column = column.impl.parent_token if column.parent == inspect(cls): return column else: assert False, "what is this column?" else: assert False, "The column is found in the "\ "class and not in superclasses?"
def handle_cycle_task_group_object_task_put( sender, obj=None, src=None, service=None): # noqa pylint: disable=unused-argument if inspect(obj).attrs.contact.history.has_changes(): ensure_assignee_is_workflow_member(obj.cycle.workflow, obj.contact) if any([inspect(obj).attrs.start_date.history.has_changes(), inspect(obj).attrs.end_date.history.has_changes()]): update_cycle_dates(obj.cycle) if inspect(obj).attrs.status.history.has_changes(): # TODO: check why update_cycle_object_parent_state destroys object history # when accepting the only task in a cycle. The listener below is a # workaround because of that. Signals.status_change.send( obj.__class__, obj=obj, new_status=obj.status, old_status=inspect(obj).attrs.status.history.deleted.pop(), ) update_cycle_task_object_task_parent_state(obj) # Doing this regardless of status.history.has_changes() is important in order # to update objects that have been declined. It updates the os_last_updated # date and last_updated_by. if getattr(obj.task_group_task, 'object_approval', None): for tgobj in obj.task_group_task.task_group.objects: if obj.status == 'Verified': tgobj.modified_by = get_current_user() tgobj.set_reviewed_state() db.session.add(tgobj) db.session.flush()
def _get_criteria(keys, class_, obj): criteria = [] visited_constraints = [] for key in keys: if key.constraint in visited_constraints: continue visited_constraints.append(key.constraint) subcriteria = [] for index, column in enumerate(key.constraint.columns): prop = sa.inspect(class_).get_property_by_column( column ) foreign_column = ( key.constraint.elements[index].column ) subcriteria.append( getattr(class_, prop.key) == getattr( obj, sa.inspect(type(obj)) .get_property_by_column( foreign_column ).key ) ) criteria.append(sa.and_(*subcriteria)) return criteria
def extract_qmps(self, sqla_cls, subject_pattern, alias_maker, for_graph): mapper = inspect(sqla_cls) supercls = next(islice( sqla_inheritance_with_conditions(sqla_cls), 1, 2), None) if supercls: supermapper = inspect(supercls) super_props = set(chain( supermapper.columns, supermapper.relationships)) else: super_props = set() for c in chain(mapper.columns, mapper.relationships): # Local columns only to avoid duplication if c in super_props: # But there are exceptions if not self.delayed_column(sqla_cls, c, for_graph): continue # in this case, make sure superclass is in aliases. c = self.add_superclass_path(c, sqla_cls, alias_maker) if 'rdf' in getattr(c, 'info', ()): from virtuoso.vmapping import QuadMapPattern qmp = c.info['rdf'] if isinstance(qmp, QuadMapPattern): qmp = self.qmp_with_defaults( qmp, subject_pattern, sqla_cls, alias_maker, for_graph, c) if qmp is not None and qmp.graph_name == for_graph.name: qmp.resolve(sqla_cls) yield qmp
def handle_cycle_task_group_object_task_put(obj): if inspect(obj).attrs.contact.history.has_changes(): add_cycle_task_reassigned_notification(obj) history = inspect(obj).attrs.end_date.history if not history.has_changes(): return # NOTE: A history might "detect" a change even if end_date was not changed # due to different data types, i.e. date vs. datetime with the time part set # to zero. Example: # # >>> datetime(2017, 5, 15, 0, 0) == date(2017, 5, 15) # False # # We thus need to manually check both date values without the time part # in order to avoid unnecessary work and DB updates. old_date = history.deleted[0] if history.deleted else None new_date = history.added[0] if history.added else None if old_date is not None and new_date is not None: if isinstance(old_date, datetime): old_date = old_date.date() if isinstance(new_date, datetime): new_date = new_date.date() if old_date == new_date: return # we have a false positive, no change actually occurred # the end date has actually changed, respond accordingly modify_cycle_task_end_date(obj)
def done(self, new_password, field_name='password'): self.user.set_password(new_password, field_name) if self.user.no_errors(): self.dt_use = datetime.datetime.utcnow() inspect(self).session.commit() else: self.errors.extend(self.user.errors)
def test_indexed_key(self): umapper = inspect(self.classes.User) amapper = inspect(self.classes.Address) path = PathRegistry.coerce((umapper, 'addresses', amapper, 'email_address')) eq_(path[1], 'addresses') eq_(path[3], 'email_address')
def test_info(self): A = self._fixture() inspect(A).all_orm_descriptors.value.info["some key"] = "some value" eq_( inspect(A).all_orm_descriptors.value.info, {"some key": "some value"})
from sqlalchemy import Table, Column, Integer, String, MetaData, create_engine meta = MetaData() user = Table('user', meta, Column('id', Integer, primary_key=True), Column('name', String, nullable=False)) postgre_config = { 'user': '******', 'password': '******', 'host': '10.122.27.44', 'schema': 'postgres' } db_uri = 'postgresql://{user}:{password}@{host}/{schema}'.format( **postgre_config) db = create_engine(db_uri) # MetaData.reflect(): print(user.metadata.tables) user.metadata.reflect(bind=db) print(user.metadata.tables) # inspect():检查已连接数据库表的信息 from sqlalchemy import inspect inspector = inspect(bind=db) print(inspector.get_table_names()) print(inspector.get_columns('ab_user')) print(inspector.get_primary_keys('ab_user'))
def go(): ma = sa.inspect(aliased(A)) m1._path_registry[m1.attrs.bs][ma][m1.attrs.bar]
def go(): u1 = aliased(User) inspect(u1)
def go(): adapter = sql_util.ColumnAdapter(inspect(u1).selectable) adapter.columns[User.id]
def go(): inspect(User)._path_registry[User.addresses.property][inspect( Address)]
def go(): u1 = aliased(User) inspect(u1)._path_registry[User.addresses.property]
def test_get_schema_names(self): """ SQLAlchemy: Test get schema names """ insp = inspect(self.engine) self.assertEqual(insp.get_schema_names(), ["default"])
def __prune_fields(self): inj = inspect(self).mapper.column_attrs print(inj) if not self._fields: all_columns = set(columns) self._fields = list(all_columns - set(self.__exclude))
import pandas as pd import numpy as np import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine from flask import Flask, jsonify, render_template from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) engine = create_engine("sqlite:///db/StarsCrash.db", echo=False) inspector = inspect(engine) inspector.get_table_names() columns = inspector.get_columns('traffic') listcolumns = [] for c in columns: print(c['name']) listcolumns.append(c['name']) print(listcolumns) results = engine.execute('SELECT * FROM traffic LIMIT 100').fetchall() df = pd.DataFrame(results, columns=listcolumns) # print(df) df = df.to_json(orient='index')
def inspect_search_vectors(entity): return [ getattr(entity, key).property.columns[0] for key, column in sa.inspect(entity).columns.items() if isinstance(column.type, TSVectorType) ]
def mapper_primary_key(model_class): """Return primary keys of `model_class`.""" try: return sa.inspect(model_class).primary_key except Exception: # pragma: no cover pass
name='wally', status='CGG', engineer_name='engineer2', primary_language='python'), Manager( name='jsmith', status='ABA', manager_name='manager2') ]) session.add(c) session.commit() c = session.query(Company).get(1) for e in c.employees: print(e, inspect(e).key, e.company) assert set([e.name for e in c.employees]) == set( ['pointy haired boss', 'dilbert', 'joesmith', 'wally', 'jsmith']) print("\n") dilbert = session.query(Person).filter_by(name='dilbert').one() dilbert2 = session.query(Engineer).filter_by(name='dilbert').one() assert dilbert is dilbert2 dilbert.engineer_name = 'hes dilbert!' session.commit() c = session.query(Company).get(1) for e in c.employees: print(e)
def __init__(self): self.engine = create_engine(get_database_url(), echo=False) self.session = sessionmaker(bind=self.engine)() if not inspect(self.engine).has_table(self.model.__tablename__): self.model.metadata.create_all(bind=self.engine)
def object_as_dict(o): return { col.key: getattr(o, col.key) for col in inspect(o).mapper.column_attrs }
def go(fn, *args, **kw): try: return fn(*args, **kw) finally: drop_all_tables(config.db, inspect(config.db), include_names=names)
def make(self): ret = {} m = inspect(self) for c in m.attrs: ret[c.key] = None return (ret)
def _attrs(self): return [a.key for a in inspect(self.model).attrs if a.key != self.pk]
def object_as_dict(obj): '''Converts SQLALchemy Query Results to Dict *Input: ORM Object *Output: Single Object as Dict ''' return {c.key: getattr(obj, c.key) for c in inspect(obj).mapper.column_attrs}
def object_id(self): return '-'.join([str(_id) for _id in inspect(self.model).identity])
def test_foreign_key_option_inspection(self): metadata = self.metadata Table( 'person', metadata, Column('id', String(length=32), nullable=False, primary_key=True), Column( 'company_id', ForeignKey('company.id', name='person_company_id_fkey', match='FULL', onupdate='RESTRICT', ondelete='RESTRICT', deferrable=True, initially='DEFERRED'))) Table( 'company', metadata, Column('id', String(length=32), nullable=False, primary_key=True), Column('name', String(length=255)), Column( 'industry_id', ForeignKey( 'industry.id', name='company_industry_id_fkey', onupdate='CASCADE', ondelete='CASCADE', deferrable=False, # PG default # PG default initially='IMMEDIATE'))) Table('industry', metadata, Column('id', Integer(), nullable=False, primary_key=True), Column('name', String(length=255))) fk_ref = { 'person_company_id_fkey': { 'name': 'person_company_id_fkey', 'constrained_columns': ['company_id'], 'referred_columns': ['id'], 'referred_table': 'company', 'referred_schema': None, 'options': { 'onupdate': 'RESTRICT', 'deferrable': True, 'ondelete': 'RESTRICT', 'initially': 'DEFERRED', 'match': 'FULL' } }, 'company_industry_id_fkey': { 'name': 'company_industry_id_fkey', 'constrained_columns': ['industry_id'], 'referred_columns': ['id'], 'referred_table': 'industry', 'referred_schema': None, 'options': { 'onupdate': 'CASCADE', 'deferrable': None, 'ondelete': 'CASCADE', 'initially': None, 'match': None } } } metadata.create_all() inspector = inspect(testing.db) fks = inspector.get_foreign_keys('person') + \ inspector.get_foreign_keys('company') for fk in fks: eq_(fk, fk_ref[fk['name']])
def object_as_dict(obj): return { c.key: getattr(obj, c.key) for c in inspect(obj).mapper.column_attrs }
def test_get_foreign_table_names(self): inspector = inspect(testing.db) with testing.db.connect() as conn: ft_names = inspector.get_foreign_table_names() eq_(ft_names, ['test_foreigntable'])
def execute_register(self, device: Device) -> Device: """Synchronizes one device to the DB. This method tries to get an existing device using the HID or one of the tags, and... - if it already exists it returns a "local synced version" –the same ``device`` you passed-in but with updated values from the database. In this case we do not "touch" any of its values on the DB. - If it did not exist, a new device is created in the db. This method validates that all passed-in tags (``device.tags``), if linked, are linked to the same device, ditto for the hid. Finally it links the tags with the device. If you pass-in a component that is inside a parent, use :meth:`.execute_register_component` as it has more specialized methods to handle them. :param device: The device to synchronize to the DB. :raise NeedsId: The device has not any identifier we can use. To still create the device use ``force_creation``. :raise DatabaseError: Any other error from the DB. :return: The synced device from the db with the tags linked. """ assert inspect(device).transient, 'Device cannot be already synced from DB' assert all(inspect(tag).transient for tag in device.tags), 'Tags cannot be synced from DB' db_device = None if device.hid: with suppress(ResourceNotFound): db_device = Device.query.filter_by(hid=device.hid, owner_id=g.user.id, active=True).one() if db_device and db_device.allocated: raise ResourceNotFound('device is actually allocated {}'.format(device)) try: tags = {Tag.from_an_id(tag.id).one() for tag in device.tags} # type: Set[Tag] except ResourceNotFound: raise ResourceNotFound('tag you are linking to device {}'.format(device)) linked_tags = {tag for tag in tags if tag.device_id} # type: Set[Tag] if linked_tags: sample_tag = next(iter(linked_tags)) for tag in linked_tags: if tag.device_id != sample_tag.device_id: raise MismatchBetweenTags(tag, sample_tag) # Tags linked to different devices if db_device: # Device from hid if sample_tag.device_id != db_device.id: # Device from hid != device from tags raise MismatchBetweenTagsAndHid(db_device.id, db_device.hid) else: # There was no device from hid if sample_tag.device.physical_properties != device.physical_properties: # Incoming physical props of device != props from tag's device # which means that the devices are not the same raise MismatchBetweenProperties(sample_tag.device.physical_properties, device.physical_properties) db_device = sample_tag.device if db_device: # Device from hid or tags self.merge(device, db_device) else: # Device is new and tags are not linked to a device device.tags.clear() # We don't want to add the transient dummy tags db.session.add(device) db_device = device db_device.tags |= tags # Union of tags the device had plus the (potentially) new ones try: db.session.flush() except IntegrityError as e: # Manage 'one tag per organization' unique constraint if 'One tag per organization' in e.args[0]: # todo test for this id = int(e.args[0][135:e.args[0].index(',', 135)]) raise ValidationError('The device is already linked to tag {} ' 'from the same organization.'.format(id), field_names=['device.tags']) else: raise assert db_device is not None return db_device
def test_get_view_names(self): insp = inspect(testing.db) eq_(set(insp.get_view_names()), set(['test_mview', 'test_regview']))
def test_get_table_names_no_foreign(self): inspector = inspect(testing.db) with testing.db.connect() as conn: names = inspector.get_table_names() eq_(names, ['testtable'])
def to_dict(self): return { c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs }
def test_get_view_definition(self): insp = inspect(testing.db) eq_( re.sub(r'[\n\t ]+', ' ', insp.get_view_definition("test_mview").strip()), "SELECT testtable.id, testtable.data FROM testtable;")
def _protection_changed(sender, obj, **kwargs): if not inspect(obj).persistent: return _register_change(obj, ChangeType.protection_changed)
def intercept_status_set(target, newvalue, oldvalue, _): """Intercept status transitions.""" # will catch updates in detached state and again when we merge it into session if not inspect(target).detached and oldvalue != newvalue: target.logger.info("Parametric job %d.%d transitioned from status %s to %s", target.request_id, target.id, oldvalue.name, newvalue.name)