def test_o2m_flush_size(self):
        mapper(User, users, properties={
            'addresses':relationship(Address),
        })
        mapper(Address, addresses)

        sess = create_session()
        u1 = User(name='ed')
        sess.add(u1)
        self._assert_uow_size(sess, 2)

        sess.flush()

        u1.name='jack'

        self._assert_uow_size(sess, 2)
        sess.flush()

        a1 = Address(email_address='foo')
        sess.add(a1)
        sess.flush()

        u1.addresses.append(a1)

        self._assert_uow_size(sess, 6)

        sess.flush()

        sess = create_session()
        u1 = sess.query(User).first()
        u1.name='ed'
        self._assert_uow_size(sess, 2)

        u1.addresses
        self._assert_uow_size(sess, 6)
    def test_identity_conflict(self):
        users, User = self.tables.users, self.classes.User

        mapper(User, users)
        for s in (
            create_session(),
            create_session(weak_identity_map=False),
        ):
            users.delete().execute()
            u1 = User(name="ed")
            s.add(u1)
            s.flush()
            s.expunge(u1)
            u2 = s.query(User).first()
            s.expunge(u2)
            s.identity_map.add(sa.orm.attributes.instance_state(u1))

            assert_raises_message(
                sa.exc.InvalidRequestError,
                "Can't attach instance <User.*?>; another instance "
                "with key .*? is already "
                "present in this session.",
                s.identity_map.add,
                sa.orm.attributes.instance_state(u2)
            )
    def test_singlecycle_flush_size(self):
        Node, nodes = self.classes.Node, self.tables.nodes

        mapper(Node, nodes, properties={
            'children':relationship(Node)
        })
        sess = create_session()
        n1 = Node(data='ed')
        sess.add(n1)
        self._assert_uow_size(sess, 2)

        sess.flush()

        n1.data='jack'

        self._assert_uow_size(sess, 2)
        sess.flush()

        n2 = Node(data='foo')
        sess.add(n2)
        sess.flush()

        n1.children.append(n2)

        self._assert_uow_size(sess, 3)

        sess.flush()

        sess = create_session()
        n1 = sess.query(Node).first()
        n1.data='ed'
        self._assert_uow_size(sess, 2)

        n1.children
        self._assert_uow_size(sess, 2)
Exemple #4
0
    def test_one_to_one_cascade(self):
        users, Address, addresses, User = (
            self.tables.users,
            self.classes.Address,
            self.tables.addresses,
            self.classes.User,
        )

        mapper(User, users, properties={"address": relationship(mapper(Address, addresses), uselist=False)})
        load = self.load_tracker(User)
        self.load_tracker(Address, load)
        sess = create_session()

        u = User()
        u.id = 7
        u.name = "fred"
        a1 = Address()
        a1.email_address = "*****@*****.**"
        u.address = a1

        sess.add(u)
        sess.flush()

        eq_(load.called, 0)

        sess2 = create_session()
        u2 = sess2.query(User).get(7)
        eq_(load.called, 1)
        u2.name = "fred2"
        u2.address.email_address = "*****@*****.**"
        eq_(load.called, 2)

        u3 = sess.merge(u2)
        eq_(load.called, 2)
        assert u3 is u
Exemple #5
0
    def test_deferred_cols_missing_in_load_state_reset(self):
        Data = self.classes.DataDefer

        sess = create_session()

        d1 = Data(data='d1')
        sess.add(d1)
        sess.flush()
        sess.close()

        sess = create_session()
        d1 = sess.query(Data).from_statement(
                        select([Data.id])).options(undefer(Data.data)).first()
        d1.data = 'd2'

        # the deferred loader has to clear out any state
        # on the col, including that 'd2' here
        d1 = sess.query(Data).populate_existing().first()

        def go():
            eq_(d1.data, 'd1')

        self.assert_sql_count(
            testing.db, go, 1
        )
    def test_joinedload_on_subclass(self):
        sess = create_session()
        expected = [
            Engineer(
                name="dilbert",
                engineer_name="dilbert",
                primary_language="java",
                status="regular engineer",
                machines=[
                    Machine(name="IBM ThinkPad"),
                    Machine(name="IPhone")])]

        def go():
            # test load People with joinedload to engineers + machines
            eq_(sess.query(Person)
                    .with_polymorphic('*')
                    .options(joinedload(Engineer.machines))
                    .filter(Person.name == 'dilbert').all(),
              expected)
        self.assert_sql_count(testing.db, go, 1)

        sess = create_session()
        def go():
            # test load People with subqueryload to engineers + machines
            eq_(sess.query(Person)
                    .with_polymorphic('*')
                    .options(subqueryload(Engineer.machines))
                    .filter(Person.name == 'dilbert').all(),
              expected)
        self.assert_sql_count(testing.db, go, 2)
    def test_relationship_to_polymorphic_three(self):
        expected = self._company_with_emps_machines_fixture()
        sess = create_session()

        sess = create_session()
        def go():
            eq_(sess.query(Company)
                    .options(subqueryload_all(
                        Company.employees.of_type(Engineer),
                        Engineer.machines))
                    .all(),
                expected)

        # the old case where subqueryload_all
        # didn't work with of_tyoe
        #count = { '':8, 'Joins':4, 'Unions':4, 'Polymorphic':3,
        #    'AliasedJoins':4}[self.select_type]

        # query one is company->Person/Engineer->Machines
        # query two is Person/Engineer subq
        # query three is Machines subq
        # (however this test can't tell if the Q was a
        # lazyload or subqload ...)
        # query four is managers + boss for row #3
        # query five is managers for row #4
        count = 5
        self.assert_sql_count(testing.db, go, count)
Exemple #8
0
    def test_no_load_with_backrefs(self):
        """load=False populates relationships in both
        directions without requiring a load"""

        users, Address, addresses, User = (
            self.tables.users,
            self.classes.Address,
            self.tables.addresses,
            self.classes.User,
        )

        mapper(User, users, properties={"addresses": relationship(mapper(Address, addresses), backref="user")})

        u = User(id=7, name="fred", addresses=[Address(email_address="ad1"), Address(email_address="ad2")])
        sess = create_session()
        sess.add(u)
        sess.flush()
        sess.close()
        assert "user" in u.addresses[1].__dict__

        sess = create_session()
        u2 = sess.merge(u, load=False)
        assert "user" in u2.addresses[1].__dict__
        eq_(u2.addresses[1].user, User(id=7, name="fred"))

        sess.expire(u2.addresses[1], ["user"])
        assert "user" not in u2.addresses[1].__dict__
        sess.close()

        sess = create_session()
        u = sess.merge(u2, load=False)
        assert "user" not in u.addresses[1].__dict__
        eq_(u.addresses[1].user, User(id=7, name="fred"))
Exemple #9
0
    def test_polymorphic_deferred(self):
        email_users, users = (self.tables.email_users, self.tables.users)

        mapper(
            User,
            users,
            polymorphic_identity="user",
            polymorphic_on=users.c.type,
        )
        mapper(
            EmailUser,
            email_users,
            inherits=User,
            polymorphic_identity="emailuser",
        )

        eu = EmailUser(name="user1", email_address="*****@*****.**")
        sess = create_session()
        sess.add(eu)
        sess.flush()
        sess.expunge_all()

        eu = sess.query(User).first()
        eu2 = pickle.loads(pickle.dumps(eu))
        sess2 = create_session()
        sess2.add(eu2)
        assert "email_address" not in eu2.__dict__
        eq_(eu2.email_address, "*****@*****.**")
    def test_versioncheck(self):
        """query.with_lockmode performs a 'version check' on an already loaded instance"""

        s1 = create_session(autocommit=False)

        mapper(Foo, version_table, version_id_col=version_table.c.version_id)
        f1s1 = Foo(value='f1 value')
        s1.add(f1s1)
        s1.commit()

        s2 = create_session(autocommit=False)
        f1s2 = s2.query(Foo).get(f1s1.id)
        f1s2.value='f1 new value'
        s2.commit()

        # load, version is wrong
        assert_raises_message(
                sa.orm.exc.StaleDataError, 
                r"Instance .* has version id '\d+' which does not "
                r"match database-loaded version id '\d+'",
                s1.query(Foo).with_lockmode('read').get, f1s1.id
            )

        # reload it - this expires the old version first
        s1.refresh(f1s1, lockmode='read')

        # now assert version OK
        s1.query(Foo).with_lockmode('read').get(f1s1.id)

        # assert brand new load is OK too
        s1.close()
        s1.query(Foo).with_lockmode('read').get(f1s1.id)
Exemple #11
0
    def test_one_to_one_cascade(self):

        mapper(User, users, properties={
            'address':relationship(mapper(Address, addresses),uselist = False)
        })
        on_load = self.on_load_tracker(User)
        self.on_load_tracker(Address, on_load)
        sess = create_session()

        u = User()
        u.id = 7
        u.name = "fred"
        a1 = Address()
        a1.email_address='*****@*****.**'
        u.address = a1

        sess.add(u)
        sess.flush()

        eq_(on_load.called, 0)

        sess2 = create_session()
        u2 = sess2.query(User).get(7)
        eq_(on_load.called, 1)
        u2.name = 'fred2'
        u2.address.email_address = '*****@*****.**'
        eq_(on_load.called, 2)

        u3 = sess.merge(u2)
        eq_(on_load.called, 2)
        assert u3 is u
Exemple #12
0
    def test_dontload_with_eager(self):
        """

        This test illustrates that with load=False, we can't just copy the
        committed_state of the merged instance over; since it references
        collection objects which themselves are to be merged.  This
        committed_state would instead need to be piecemeal 'converted' to
        represent the correct objects.  However, at the moment I'd rather not
        support this use case; if you are merging with load=False, you're
        typically dealing with caching and the merged objects shouldnt be
        'dirty'.

        """
        mapper(User, users, properties={
            'addresses':relationship(mapper(Address, addresses))
        })
        sess = create_session()
        u = User()
        u.id = 7
        u.name = "fred"
        a1 = Address()
        a1.email_address='*****@*****.**'
        u.addresses.append(a1)

        sess.add(u)
        sess.flush()

        sess2 = create_session()
        u2 = sess2.query(User).options(sa.orm.joinedload('addresses')).get(7)

        sess3 = create_session()
        u3 = sess3.merge(u2, load=False)
        def go():
            sess3.flush()
        self.assert_sql_count(testing.db, go, 0)
Exemple #13
0
    def test_many_to_one_cascade(self):
        mapper(Address, addresses, properties={
            'user':relationship(User)
        })
        mapper(User, users)

        u1 = User(id=1, name="u1")
        a1 =Address(id=1, email_address="a1", user=u1)
        u2 = User(id=2, name="u2")

        sess = create_session()
        sess.add_all([a1, u2])
        sess.flush()

        a1.user = u2

        sess2 = create_session()
        a2 = sess2.merge(a1)
        eq_(
            attributes.get_history(a2, 'user'), 
            ([u2], (), [attributes.PASSIVE_NO_RESULT])
        )
        assert a2 in sess2.dirty

        sess.refresh(a1)

        sess2 = create_session()
        a2 = sess2.merge(a1, load=False)
        eq_(
            attributes.get_history(a2, 'user'), 
            ((), [u1], ())
        )
        assert a2 not in sess2.dirty
Exemple #14
0
    def test_class_deferred_cols(self):
        addresses, users = (self.tables.addresses,
                                self.tables.users)

        mapper(User, users, properties={
            'name': sa.orm.deferred(users.c.name),
            'addresses': relationship(Address, backref="user")
        })
        mapper(Address, addresses, properties={
            'email_address': sa.orm.deferred(addresses.c.email_address)
        })
        sess = create_session()
        u1 = User(name='ed')
        u1.addresses.append(Address(email_address='*****@*****.**'))
        sess.add(u1)
        sess.flush()
        sess.expunge_all()
        u1 = sess.query(User).get(u1.id)
        assert 'name' not in u1.__dict__
        assert 'addresses' not in u1.__dict__

        u2 = pickle.loads(pickle.dumps(u1))
        sess2 = create_session()
        sess2.add(u2)
        eq_(u2.name, 'ed')
        eq_(u2, User(name='ed', addresses=[Address(email_address='*****@*****.**')]))

        u2 = pickle.loads(pickle.dumps(u1))
        sess2 = create_session()
        u2 = sess2.merge(u2, load=False)
        eq_(u2.name, 'ed')
        eq_(u2, User(name='ed', addresses=[Address(email_address='*****@*****.**')]))
Exemple #15
0
    def test_no_load_sets_backrefs(self):
        users, Address, addresses, User = (self.tables.users,
                                self.classes.Address,
                                self.tables.addresses,
                                self.classes.User)

        mapper(User, users, properties={
            'addresses':relationship(mapper(Address, addresses),
                            backref='user')})

        sess = create_session()
        u = User()
        u.id = 7
        u.name = "fred"
        a1 = Address()
        a1.email_address='*****@*****.**'
        u.addresses.append(a1)

        sess.add(u)
        sess.flush()

        assert u.addresses[0].user is u

        sess2 = create_session()
        u2 = sess2.merge(u, load=False)
        assert not sess2.dirty
        def go():
            assert u2.addresses[0].user is u2
        self.assert_sql_count(testing.db, go, 0)
    def test_versioncheck(self):
        """query.with_lockmode performs a 'version check' on an already loaded instance"""

        s1 = create_session(autocommit=False)

        mapper(Foo, version_table, version_id_col=version_table.c.version_id)
        f1s1 = Foo(value='f1 value')
        s1.add(f1s1)
        s1.commit()

        s2 = create_session(autocommit=False)
        f1s2 = s2.query(Foo).get(f1s1.id)
        f1s2.value='f1 new value'
        s2.commit()

        # load, version is wrong
        assert_raises(
                sa.orm.exc.ConcurrentModificationError, 
                s1.query(Foo).with_lockmode('read').get, f1s1.id
            )

        # reload it - this expires the old version first
        s1.refresh(f1s1, lockmode='read')
        
        # now assert version OK
        s1.query(Foo).with_lockmode('read').get(f1s1.id)

        # assert brand new load is OK too
        s1.close()
        s1.query(Foo).with_lockmode('read').get(f1s1.id)
Exemple #17
0
    def test_bound_connection_transactional(self):
        User, users = self.classes.User, self.tables.users

        mapper(User, users)
        c = testing.db.connect()

        sess = create_session(bind=c, autocommit=False)
        u = User(name='u1')
        sess.add(u)
        sess.flush()
        sess.close()
        assert not c.in_transaction()
        assert c.scalar("select count(1) from users") == 0

        sess = create_session(bind=c, autocommit=False)
        u = User(name='u2')
        sess.add(u)
        sess.flush()
        sess.commit()
        assert not c.in_transaction()
        assert c.scalar("select count(1) from users") == 1
        c.execute("delete from users")
        assert c.scalar("select count(1) from users") == 0

        c = testing.db.connect()

        trans = c.begin()
        sess = create_session(bind=c, autocommit=True)
        u = User(name='u3')
        sess.add(u)
        sess.flush()
        assert c.in_transaction()
        trans.commit()
        assert not c.in_transaction()
        assert c.scalar("select count(1) from users") == 1
Exemple #18
0
    def test_basic(self):
        t2, t1 = self.tables.t2, self.tables.t1

        class T(object):
            pass

        class SubT(T):
            pass

        class T2(object):
            pass

        class SubT2(T2):
            pass

        mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity="t1")
        mapper(
            SubT,
            None,
            inherits=T,
            polymorphic_identity="subt1",
            properties={"t2s": relationship(SubT2, lazy="subquery", backref=sa.orm.backref("subt", lazy="subquery"))},
        )
        mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity="t2")
        mapper(SubT2, None, inherits=T2, polymorphic_identity="subt2")

        # testing a particular endless loop condition in eager load setup
        create_session().query(SubT).all()
    def test_add_delete(self):
        User, Address, addresses, users = (self.classes.User,
                                self.classes.Address,
                                self.tables.addresses,
                                self.tables.users)


        s = create_session()
        mapper(User, users, properties={
            'addresses': relationship(Address, cascade="all, delete")
        })
        mapper(Address, addresses)

        user = User(name='u1')

        assert_raises_message(sa.exc.InvalidRequestError,
                              'is not persisted', s.delete, user)

        s.add(user)
        s.flush()
        user = s.query(User).one()
        s.expunge(user)
        assert user not in s

        # modify outside of session, assert changes remain/get saved
        user.name = "fred"
        s.add(user)
        assert user in s
        assert user in s.dirty
        s.flush()
        s.expunge_all()
        assert s.query(User).count() == 1
        user = s.query(User).one()
        assert user.name == 'fred'

        # ensure its not dirty if no changes occur
        s.expunge_all()
        assert user not in s
        s.add(user)
        assert user in s
        assert user not in s.dirty

        s2 = create_session()
        assert_raises_message(sa.exc.InvalidRequestError,
                              'is already attached to session',
                              s2.delete, user)
        u2 = s2.query(User).get(user.id)
        s2.expunge(u2)
        assert_raises_message(
            sa.exc.InvalidRequestError,
            'another instance .* is already present', s.delete, u2)
        s.expire(user)
        s.expunge(user)
        assert user not in s
        s.delete(user)
        assert user in s

        s.flush()
        assert user not in s
        assert s.query(User).count() == 0
    def test_limit(self):
        """Limit operations combined with lazy-load relationships."""

        users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users,
                                self.tables.items,
                                self.tables.order_items,
                                self.tables.orders,
                                self.classes.Item,
                                self.classes.User,
                                self.classes.Address,
                                self.classes.Order,
                                self.tables.addresses)


        mapper(Item, items)
        mapper(Order, orders, properties={
            'items':relationship(Item, secondary=order_items, lazy='subquery',
                order_by=items.c.id)
        })
        mapper(User, users, properties={
            'addresses':relationship(mapper(Address, addresses),
                            lazy='subquery',
                            order_by=addresses.c.id),
            'orders':relationship(Order, lazy='select', order_by=orders.c.id)
        })

        sess = create_session()
        q = sess.query(User)

        l = q.order_by(User.id).limit(2).offset(1).all()
        eq_(self.static.user_all_result[1:3], l)

        sess = create_session()
        l = q.order_by(sa.desc(User.id)).limit(2).offset(2).all()
        eq_(list(reversed(self.static.user_all_result[0:2])), l)
Exemple #21
0
    def test_no_load_with_backrefs(self):
        """load=False populates relationships in both directions without requiring a load"""
        mapper(User, users, properties={
            'addresses':relationship(mapper(Address, addresses), backref='user')
        })

        u = User(id=7, name='fred', addresses=[
            Address(email_address='ad1'),
            Address(email_address='ad2')])
        sess = create_session()
        sess.add(u)
        sess.flush()
        sess.close()
        assert 'user' in u.addresses[1].__dict__

        sess = create_session()
        u2 = sess.merge(u, load=False)
        assert 'user' in u2.addresses[1].__dict__
        eq_(u2.addresses[1].user, User(id=7, name='fred'))

        sess.expire(u2.addresses[1], ['user'])
        assert 'user' not in u2.addresses[1].__dict__
        sess.close()

        sess = create_session()
        u = sess.merge(u2, load=False)
        assert 'user' not in u.addresses[1].__dict__
        eq_(u.addresses[1].user, User(id=7, name='fred'))
    def test_no_load_disallows_dirty(self):
        """load=False doesnt support 'dirty' objects right now

        (see test_no_load_with_eager()). Therefore lets assert it.

        """

        users, User = self.tables.users, self.classes.User

        mapper(User, users)
        sess = create_session()
        u = User()
        u.id = 7
        u.name = "fred"
        sess.add(u)
        sess.flush()

        u.name = 'ed'
        sess2 = create_session()
        try:
            sess2.merge(u, load=False)
            assert False
        except sa.exc.InvalidRequestError, e:
            assert ("merge() with load=False option does not support "
                    "objects marked as 'dirty'.  flush() all changes on mapped "
                    "instances before merging with load=False.") in str(e)
Exemple #23
0
    def test_extension(self):
        mapper(User, users)
        log = []
        class MyExt(sa.orm.session.SessionExtension):
            def before_commit(self, session):
                log.append('before_commit')
            def after_commit(self, session):
                log.append('after_commit')
            def after_rollback(self, session):
                log.append('after_rollback')
            def before_flush(self, session, flush_context, objects):
                log.append('before_flush')
            def after_flush(self, session, flush_context):
                log.append('after_flush')
            def after_flush_postexec(self, session, flush_context):
                log.append('after_flush_postexec')
            def after_begin(self, session, transaction, connection):
                log.append('after_begin')
            def after_attach(self, session, instance):
                log.append('after_attach')
            def after_bulk_update(self, session, query, query_context, result):
                log.append('after_bulk_update')
            def after_bulk_delete(self, session, query, query_context, result):
                log.append('after_bulk_delete')

        sess = create_session(extension = MyExt())
        u = User(name='u1')
        sess.add(u)
        sess.flush()
        assert log == ['after_attach', 'before_flush', 'after_begin', 'after_flush', 'before_commit', 'after_commit', 'after_flush_postexec']

        log = []
        sess = create_session(autocommit=False, extension=MyExt())
        u = User(name='u1')
        sess.add(u)
        sess.flush()
        assert log == ['after_attach', 'before_flush', 'after_begin', 'after_flush', 'after_flush_postexec']

        log = []
        u.name = 'ed'
        sess.commit()
        assert log == ['before_commit', 'before_flush', 'after_flush', 'after_flush_postexec', 'after_commit']

        log = []
        sess.commit()
        assert log == ['before_commit', 'after_commit']

        log = []
        sess.query(User).delete()
        assert log == ['after_begin', 'after_bulk_delete']

        log = []
        sess.query(User).update({'name': 'foo'})
        assert log == ['after_bulk_update']
        
        log = []
        sess = create_session(autocommit=False, extension=MyExt(), bind=testing.db)
        conn = sess.connection()
        assert log == ['after_begin']
Exemple #24
0
    def test_instance_deferred_cols(self):
        users, addresses = (self.tables.users, self.tables.addresses)

        mapper(
            User,
            users,
            properties={"addresses": relationship(Address, backref="user")},
        )
        mapper(Address, addresses)

        sess = create_session()
        u1 = User(name="ed")
        u1.addresses.append(Address(email_address="*****@*****.**"))
        sess.add(u1)
        sess.flush()
        sess.expunge_all()

        u1 = (
            sess.query(User)
            .options(
                sa.orm.defer("name"), sa.orm.defer("addresses.email_address")
            )
            .get(u1.id)
        )
        assert "name" not in u1.__dict__
        assert "addresses" not in u1.__dict__

        u2 = pickle.loads(pickle.dumps(u1))
        sess2 = create_session()
        sess2.add(u2)
        eq_(u2.name, "ed")
        assert "addresses" not in u2.__dict__
        ad = u2.addresses[0]
        assert "email_address" not in ad.__dict__
        eq_(ad.email_address, "*****@*****.**")
        eq_(
            u2,
            User(name="ed", addresses=[Address(email_address="*****@*****.**")]),
        )

        u2 = pickle.loads(pickle.dumps(u1))
        sess2 = create_session()
        u2 = sess2.merge(u2, load=False)
        eq_(u2.name, "ed")
        assert "addresses" not in u2.__dict__
        ad = u2.addresses[0]

        # mapper options now transmit over merge(),
        # new as of 0.6, so email_address is deferred.
        assert "email_address" not in ad.__dict__

        eq_(ad.email_address, "*****@*****.**")
        eq_(
            u2,
            User(name="ed", addresses=[Address(email_address="*****@*****.**")]),
        )
    def test_pickled_update(self):
        users, User = self.tables.users, pickleable.User

        mapper(User, users)
        sess1 = create_session()
        sess2 = create_session()
        u1 = User(name="u1")
        sess1.add(u1)
        assert_raises_message(sa.exc.InvalidRequestError, "already attached to session", sess2.add, u1)
        u2 = pickle.loads(pickle.dumps(u1))
        sess2.add(u2)
Exemple #26
0
    def test_save_update_delete(self):

        s = create_session()
        mapper(User, users, properties={
            'addresses':relation(Address, cascade="all, delete")
        })
        mapper(Address, addresses)

        user = User(name='u1')

        assert_raises_message(sa.exc.InvalidRequestError, "is not persisted", s.update, user)
        assert_raises_message(sa.exc.InvalidRequestError, "is not persisted", s.delete, user)

        s.add(user)
        s.flush()
        user = s.query(User).one()
        s.expunge(user)
        assert user not in s

        # modify outside of session, assert changes remain/get saved
        user.name = "fred"
        s.add(user)
        assert user in s
        assert user in s.dirty
        s.flush()
        s.expunge_all()
        assert s.query(User).count() == 1
        user = s.query(User).one()
        assert user.name == 'fred'

        # ensure its not dirty if no changes occur
        s.expunge_all()
        assert user not in s
        s.add(user)
        assert user in s
        assert user not in s.dirty

        assert_raises_message(sa.exc.InvalidRequestError, "is already persistent", s.save, user)

        s2 = create_session()
        assert_raises_message(sa.exc.InvalidRequestError, "is already attached to session", s2.delete, user)

        u2 = s2.query(User).get(user.id)
        assert_raises_message(sa.exc.InvalidRequestError, "another instance with key", s.delete, u2)

        s.expire(user)
        s.expunge(user)
        assert user not in s
        s.delete(user)
        assert user in s
        
        s.flush()
        assert user not in s
        assert s.query(User).count() == 0
Exemple #27
0
 def test_pickled_update(self):
     mapper(User, users)
     sess1 = create_session()
     sess2 = create_session()
     u1 = User(name='u1')
     sess1.add(u1)
     assert_raises_message(sa.exc.InvalidRequestError,
                           'already attached to session', sess2.add,
                           u1)
     u2 = pickle.loads(pickle.dumps(u1))
     sess2.add(u2)
Exemple #28
0
 def test_transactions_isolated(self):
     mapper(User, users)
     users.delete().execute()
     
     s1 = create_session(bind=testing.db, autocommit=False)
     s2 = create_session(bind=testing.db, autocommit=False)
     u1 = User(name='u1')
     s1.add(u1)
     s1.flush()
     
     assert s2.query(User).all() == []
    def test_double(self):
        """tests lazy loading with two relationships simulatneously, from the same table, using aliases.  """

        users, orders, User, Address, Order, addresses = (self.tables.users,
                                self.tables.orders,
                                self.classes.User,
                                self.classes.Address,
                                self.classes.Order,
                                self.tables.addresses)


        openorders = sa.alias(orders, 'openorders')
        closedorders = sa.alias(orders, 'closedorders')

        mapper(Address, addresses)

        mapper(Order, orders)

        open_mapper = mapper(Order, openorders, non_primary=True)
        closed_mapper = mapper(Order, closedorders, non_primary=True)
        mapper(User, users, properties = dict(
            addresses = relationship(Address, lazy = True),
            open_orders = relationship(open_mapper, primaryjoin = sa.and_(openorders.c.isopen == 1, users.c.id==openorders.c.user_id), lazy='select'),
            closed_orders = relationship(closed_mapper, primaryjoin = sa.and_(closedorders.c.isopen == 0, users.c.id==closedorders.c.user_id), lazy='select')
        ))
        q = create_session().query(User)

        assert [
            User(
                id=7,
                addresses=[Address(id=1)],
                open_orders = [Order(id=3)],
                closed_orders = [Order(id=1), Order(id=5)]
            ),
            User(
                id=8,
                addresses=[Address(id=2), Address(id=3), Address(id=4)],
                open_orders = [],
                closed_orders = []
            ),
            User(
                id=9,
                addresses=[Address(id=5)],
                open_orders = [Order(id=4)],
                closed_orders = [Order(id=2)]
            ),
            User(id=10)

        ] == q.all()

        sess = create_session()
        user = sess.query(User).get(7)
        assert [Order(id=1), Order(id=5)] == create_session().query(closed_mapper).with_parent(user, property='closed_orders').all()
        assert [Order(id=3)] == create_session().query(open_mapper).with_parent(user, property='open_orders').all()
    def test_noversioncheck(self):
        """test query.with_lockmode works when the mapper has no version id col"""
        s1 = create_session(autocommit=False)
        mapper(Foo, version_table)
        f1s1 = Foo(value="foo", version_id=0)
        s1.add(f1s1)
        s1.commit()

        s2 = create_session(autocommit=False)
        f1s2 = s2.query(Foo).with_lockmode('read').get(f1s1.id)
        assert f1s2.id == f1s1.id
        assert f1s2.value == f1s1.value
 def zwroc_poID(ID_Klienta):
     session = create_session(bind=engine)
     q = session.query(Klienci).filter(Klienci.ID_Klienta == ID_Klienta)
     rekord = q.one()
     return rekord
Exemple #32
0
def setup_db(database,
             table,
             home,
             create_sfsql_tables=True,
             create_plpythonu_functions=True,
             postgis_geometry_column='wkb_geometry',
             extra_columns=[],
             language='english'):
    """Setup database tables and indexes"""
    from sqlalchemy import Column, create_engine, Integer, MetaData, \
        Table, Text, Unicode
    from sqlalchemy.orm import create_session

    LOGGER.info('Creating database %s', database)
    if database.startswith('sqlite'):
        dbtype, filepath = database.split('sqlite:///')
        dirname = os.path.dirname(filepath)
        if not os.path.exists(dirname):
            raise RuntimeError('SQLite directory %s does not exist' % dirname)

    dbase = create_engine(database)

    schema_name, table_name = table.rpartition(".")[::2]

    mdata = MetaData(dbase, schema=schema_name or None)
    create_postgis_geometry = False

    # If PostGIS 2.x detected, do not create sfsql tables.
    if dbase.name == 'postgresql':
        try:
            dbsession = create_session(dbase)
            for row in dbsession.execute('select(postgis_lib_version())'):
                postgis_lib_version = row[0]
            create_sfsql_tables = False
            create_postgis_geometry = True
            LOGGER.info('PostGIS %s detected: Skipping SFSQL tables creation',
                        postgis_lib_version)
        except:
            pass

    if create_sfsql_tables:
        LOGGER.info('Creating table spatial_ref_sys')
        srs = Table('spatial_ref_sys', mdata,
                    Column('srid', Integer, nullable=False, primary_key=True),
                    Column('auth_name', Text), Column('auth_srid', Integer),
                    Column('srtext', Text))
        srs.create()

        i = srs.insert()
        i.execute(
            srid=4326,
            auth_name='EPSG',
            auth_srid=4326,
            srtext=
            'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]'
        )

        LOGGER.info('Creating table geometry_columns')
        geom = Table(
            'geometry_columns',
            mdata,
            Column('f_table_catalog', Text, nullable=False),
            Column('f_table_schema', Text, nullable=False),
            Column('f_table_name', Text, nullable=False),
            Column('f_geometry_column', Text, nullable=False),
            Column('geometry_type', Integer),
            Column('coord_dimension', Integer),
            Column('srid', Integer, nullable=False),
            Column('geometry_format', Text, nullable=False),
        )
        geom.create()

        i = geom.insert()
        i.execute(f_table_catalog='public',
                  f_table_schema='public',
                  f_table_name=table_name,
                  f_geometry_column='wkt_geometry',
                  geometry_type=3,
                  coord_dimension=2,
                  srid=4326,
                  geometry_format='WKT')

    # abstract metadata information model

    LOGGER.info('Creating table %s', table_name)
    records = Table(
        table_name,
        mdata,
        # core; nothing happens without these
        Column('identifier', Text, primary_key=True),
        Column('typename',
               Text,
               default='csw:Record',
               nullable=False,
               index=True),
        Column('schema',
               Text,
               default='http://www.opengis.net/cat/csw/2.0.2',
               nullable=False,
               index=True),
        Column('mdsource', Text, default='local', nullable=False, index=True),
        Column('insert_date', Text, nullable=False, index=True),
        Column('xml', Unicode, nullable=False),
        Column('anytext', Text, nullable=False),
        Column('metadata', Unicode),
        Column('metadata_type',
               Text,
               default='application/xml',
               nullable=False),
        Column('language', Text, index=True),

        # identification
        Column('type', Text, index=True),
        Column('title', Text, index=True),
        Column('title_alternate', Text, index=True),
        Column('abstract', Text, index=True),
        Column('edition', Text, index=True),
        Column('keywords', Text, index=True),
        Column('keywordstype', Text, index=True),
        Column('parentidentifier', Text, index=True),
        Column('relation', Text, index=True),
        Column('time_begin', Text, index=True),
        Column('time_end', Text, index=True),
        Column('topicategory', Text, index=True),
        Column('resourcelanguage', Text, index=True),

        # attribution
        Column('creator', Text, index=True),
        Column('publisher', Text, index=True),
        Column('contributor', Text, index=True),
        Column('organization', Text, index=True),

        # security
        Column('securityconstraints', Text, index=True),
        Column('accessconstraints', Text, index=True),
        Column('otherconstraints', Text, index=True),

        # date
        Column('date', Text, index=True),
        Column('date_revision', Text, index=True),
        Column('date_creation', Text, index=True),
        Column('date_publication', Text, index=True),
        Column('date_modified', Text, index=True),
        Column('format', Text, index=True),
        Column('source', Text, index=True),

        # geospatial
        Column('crs', Text, index=True),
        Column('geodescode', Text, index=True),
        Column('denominator', Text, index=True),
        Column('distancevalue', Text, index=True),
        Column('distanceuom', Text, index=True),
        Column('wkt_geometry', Text),

        # service
        Column('servicetype', Text, index=True),
        Column('servicetypeversion', Text, index=True),
        Column('operation', Text, index=True),
        Column('couplingtype', Text, index=True),
        Column('operateson', Text, index=True),
        Column('operatesonidentifier', Text, index=True),
        Column('operatesoname', Text, index=True),

        # inspire
        Column('degree', Text, index=True),
        Column('classification', Text, index=True),
        Column('conditionapplyingtoaccessanduse', Text, index=True),
        Column('lineage', Text, index=True),
        Column('responsiblepartyrole', Text, index=True),
        Column('specificationtitle', Text, index=True),
        Column('specificationdate', Text, index=True),
        Column('specificationdatetype', Text, index=True),

        # eo
        Column('platform', Text, index=True),
        Column('instrument', Text, index=True),
        Column('sensortype', Text, index=True),
        Column('cloudcover', Text, index=True),
        # bands: JSON list of dicts with properties: name, units, min, max
        Column('bands', Text, index=True),

        # distribution
        # links: JSON list of dicts with properties: name, description, protocol, url
        Column('links', Text, index=True),
    )

    # add extra columns that may have been passed via extra_columns
    # extra_columns is a list of sqlalchemy.Column objects
    if extra_columns:
        LOGGER.info('Extra column definitions detected')
        for extra_column in extra_columns:
            LOGGER.info('Adding extra column: %s', extra_column)
            records.append_column(extra_column)

    records.create()

    conn = dbase.connect()

    if create_plpythonu_functions and not create_postgis_geometry:
        if dbase.name == 'postgresql':  # create plpythonu functions within db
            LOGGER.info('Setting plpythonu functions')
            pycsw_home = home
            function_get_anytext = '''
        CREATE OR REPLACE FUNCTION get_anytext(xml text)
        RETURNS text
        AS $$
            import sys
            sys.path.append('%s')
            from pycsw.core import util
            return util.get_anytext(xml)
            $$ LANGUAGE plpythonu;
        ''' % pycsw_home
            function_query_spatial = '''
        CREATE OR REPLACE FUNCTION query_spatial(bbox_data_wkt text, bbox_input_wkt text, predicate text, distance text)
        RETURNS text
        AS $$
            import sys
            sys.path.append('%s')
            from pycsw.core import repository
            return repository.query_spatial(bbox_data_wkt, bbox_input_wkt, predicate, distance)
            $$ LANGUAGE plpythonu;
        ''' % pycsw_home
            function_update_xpath = '''
        CREATE OR REPLACE FUNCTION update_xpath(nsmap text, xml text, recprops text)
        RETURNS text
        AS $$
            import sys
            sys.path.append('%s')
            from pycsw.core import repository
            return repository.update_xpath(nsmap, xml, recprops)
            $$ LANGUAGE plpythonu;
        ''' % pycsw_home
            function_get_geometry_area = '''
        CREATE OR REPLACE FUNCTION get_geometry_area(geom text)
        RETURNS text
        AS $$
            import sys
            sys.path.append('%s')
            from pycsw.core import repository
            return repository.get_geometry_area(geom)
            $$ LANGUAGE plpythonu;
        ''' % pycsw_home
            function_get_spatial_overlay_rank = '''
        CREATE OR REPLACE FUNCTION get_spatial_overlay_rank(target_geom text, query_geom text)
        RETURNS text
        AS $$
            import sys
            sys.path.append('%s')
            from pycsw.core import repository
            return repository.get_spatial_overlay_rank(target_geom, query_geom)
            $$ LANGUAGE plpythonu;
        ''' % pycsw_home
            conn.execute(function_get_anytext)
            conn.execute(function_query_spatial)
            conn.execute(function_update_xpath)
            conn.execute(function_get_geometry_area)
            conn.execute(function_get_spatial_overlay_rank)

    if dbase.name == 'postgresql':
        LOGGER.info('Creating PostgreSQL Free Text Search (FTS) GIN index')
        tsvector_fts = "alter table %s add column anytext_tsvector tsvector" % table_name
        conn.execute(tsvector_fts)
        index_fts = "create index fts_gin_idx on %s using gin(anytext_tsvector)" % table_name
        conn.execute(index_fts)
        # This needs to run if records exist "UPDATE records SET anytext_tsvector = to_tsvector('english', anytext)"
        trigger_fts = "create trigger ftsupdate before insert or update on %s for each row execute procedure tsvector_update_trigger('anytext_tsvector', 'pg_catalog.%s', 'anytext')" % (
            table_name, language)
        conn.execute(trigger_fts)

    if dbase.name == 'postgresql' and create_postgis_geometry:
        # create native geometry column within db
        LOGGER.info('Creating native PostGIS geometry column')
        if postgis_lib_version < '2':
            create_column_sql = "SELECT AddGeometryColumn('%s', '%s', 4326, 'POLYGON', 2)" % (
                table_name, postgis_geometry_column)
        else:
            create_column_sql = "ALTER TABLE %s ADD COLUMN %s geometry(Geometry,4326);" % (
                table_name, postgis_geometry_column)
        create_insert_update_trigger_sql = '''
DROP TRIGGER IF EXISTS %(table)s_update_geometry ON %(table)s;
DROP FUNCTION IF EXISTS %(table)s_update_geometry();
CREATE FUNCTION %(table)s_update_geometry() RETURNS trigger AS $%(table)s_update_geometry$
BEGIN
    IF NEW.wkt_geometry IS NULL THEN
        RETURN NEW;
    END IF;
    NEW.%(geometry)s := ST_GeomFromText(NEW.wkt_geometry,4326);
    RETURN NEW;
END;
$%(table)s_update_geometry$ LANGUAGE plpgsql;

CREATE TRIGGER %(table)s_update_geometry BEFORE INSERT OR UPDATE ON %(table)s
FOR EACH ROW EXECUTE PROCEDURE %(table)s_update_geometry();
    ''' % {
            'table': table_name,
            'geometry': postgis_geometry_column
        }

        create_spatial_index_sql = 'CREATE INDEX %(geometry)s_idx ON %(table)s USING GIST (%(geometry)s);' \
        % {'table': table_name, 'geometry': postgis_geometry_column}

        conn.execute(create_column_sql)
        conn.execute(create_insert_update_trigger_sql)
        conn.execute(create_spatial_index_sql)
 def readfromdatabase():
     session = create_session(bind=engine)
     testlist = session.query(Klienci).all()
     session.close()
     return testlist
Exemple #34
0
def get_session():
    if not hasattr(connection, 'sa_session'):
        session = orm.create_session()
        session.bind = get_engine()
        connection.sa_session = session
    return connection.sa_session
Exemple #35
0
    def test_order_by(self):
        Foo = self.classes.Foo

        query = create_session().query(Foo)
        assert query.order_by(Foo.bar)[0].bar == 0
        assert query.order_by(sa.desc(Foo.bar))[0].bar == 99
Exemple #36
0
    def test_merge_empty_attributes(self):
        User, dingalings = self.classes.User, self.tables.dingalings

        mapper(User, dingalings)

        sess = create_session()

        # merge empty stuff.  goes in as NULL.
        # not sure what this was originally trying to
        # test.
        u1 = sess.merge(User(id=1))
        sess.flush()
        assert u1.data is None

        # save another user with "data"
        u2 = User(id=2, data="foo")
        sess.add(u2)
        sess.flush()

        # merge User on u2's pk with
        # no "data".
        # value isn't whacked from the destination
        # dict.
        u3 = sess.merge(User(id=2))
        eq_(u3.__dict__['data'], "foo")

        # make a change.
        u3.data = 'bar'

        # merge another no-"data" user.
        # attribute maintains modified state.
        # (usually autoflush would have happened
        # here anyway).
        u4 = sess.merge(User(id=2))
        eq_(u3.__dict__['data'], "bar")

        sess.flush()
        # and after the flush.
        eq_(u3.data, "bar")

        # new row.
        u5 = User(id=3, data="foo")
        sess.add(u5)
        sess.flush()

        # blow it away from u5, but don't
        # mark as expired.  so it would just
        # be blank.
        del u5.data

        # the merge adds expiry to the
        # attribute so that it loads.
        # not sure if I like this - it currently is needed
        # for test_pickled:PickleTest.test_instance_deferred_cols
        u6 = sess.merge(User(id=3))
        assert 'data' not in u6.__dict__
        assert u6.data == "foo"

        # set it to None.  this is actually
        # a change so gets preserved.
        u6.data = None
        u7 = sess.merge(User(id=3))
        assert u6.__dict__['data'] is None
Exemple #37
0
    def test_no_load_preserves_parents(self):
        """Merge with load=False does not trigger a 'delete-orphan'
        operation.

        merge with load=False sets attributes without using events.
        this means the 'hasparent' flag is not propagated to the newly
        merged instance. in fact this works out OK, because the
        '_state.parents' collection on the newly merged instance is
        empty; since the mapper doesn't see an active 'False' setting in
        this collection when _is_orphan() is called, it does not count
        as an orphan (i.e. this is the 'optimistic' logic in
        mapper._is_orphan().)

        """

        users, Address, addresses, User = (self.tables.users,
                                           self.classes.Address,
                                           self.tables.addresses,
                                           self.classes.User)

        mapper(User,
               users,
               properties={
                   'addresses':
                   relationship(mapper(Address, addresses),
                                backref='user',
                                cascade="all, delete-orphan")
               })
        sess = create_session()
        u = User()
        u.id = 7
        u.name = "fred"
        a1 = Address()
        a1.email_address = '*****@*****.**'
        u.addresses.append(a1)
        sess.add(u)
        sess.flush()

        assert u.addresses[0].user is u

        sess2 = create_session()
        u2 = sess2.merge(u, load=False)
        assert not sess2.dirty
        a2 = u2.addresses[0]
        a2.email_address = 'somenewaddress'
        assert not sa.orm.object_mapper(a2)._is_orphan(
            sa.orm.attributes.instance_state(a2))
        sess2.flush()
        sess2.expunge_all()

        eq_(
            sess2.query(User).get(u2.id).addresses[0].email_address,
            'somenewaddress')

        # this use case is not supported; this is with a pending Address
        # on the pre-merged object, and we currently dont support
        # 'dirty' objects being merged with load=False.  in this case,
        # the empty '_state.parents' collection would be an issue, since
        # the optimistic flag is False in _is_orphan() for pending
        # instances.  so if we start supporting 'dirty' with load=False,
        # this test will need to pass

        sess = create_session()
        u = sess.query(User).get(7)
        u.addresses.append(Address())
        sess2 = create_session()
        try:
            u2 = sess2.merge(u, load=False)
            assert False

            # if load=False is changed to support dirty objects, this code
            # needs to pass
            a2 = u2.addresses[0]
            a2.email_address = 'somenewaddress'
            assert not sa.orm.object_mapper(a2)._is_orphan(
                sa.orm.attributes.instance_state(a2))
            sess2.flush()
            sess2.expunge_all()
            eq_(
                sess2.query(User).get(u2.id).addresses[0].email_address,
                'somenewaddress')
        except sa.exc.InvalidRequestError as e:
            assert "load=False option does not support" in str(e)
 def usun(ID_Sali):
     session = create_session(bind=engine)
     session.query(Sala).filter(Sala.ID_Sali == ID_Sali).delete()
     session.close()
 def zwroc_poID(ID_Zajecia):
     session = create_session(bind=engine)
     q = session.query(Zajecia).filter(Zajecia.ID_Zajecia == ID_Zajecia)
     rekord = q.one()
     return rekord
 def usun(ID_Trenera):
     session = create_session(bind=engine)
     session.query(Trenerzy).filter(
         Trenerzy.ID_Trenera == ID_Trenera).delete()
     session.close()
 def zwroc_poID(ID_Sali):
     session = create_session(bind=engine)
     q = session.query(Sala).filter(Sala.ID_Sali == ID_Sali)
     rekord = q.one()
     return rekord
 def zwroc_poID(ID_Trenera):
     session = create_session(bind=engine)
     q = session.query(Trenerzy).filter(Trenerzy.ID_Trenera == ID_Trenera)
     rekord = q.one()
     return rekord
Exemple #43
0
    def __init__(self, database, context, app_root=None, table='records', repo_filter=None):
        ''' Initialize repository '''

        self.context = context
        self.filter = repo_filter
        self.fts = False

        # Don't use relative paths, this is hack to get around
        # most wsgi restriction...
        if (app_root and database.startswith('sqlite:///') and
            not database.startswith('sqlite:////')):
            database = database.replace('sqlite:///',
                       'sqlite:///%s%s' % (app_root, os.sep))

        self.engine = Repository.create_engine('%s' % database)

        base = declarative_base(bind=self.engine)

        LOGGER.debug('binding ORM to existing database')

        self.postgis_geometry_column = None

        schema, table = util.sniff_table(table)

        self.dataset = type('dataset', (base,),
        dict(__tablename__=table,__table_args__={'autoload': True,
                                                 'schema': schema}))

        self.dbtype = self.engine.name

        self.session = create_session(self.engine)

        temp_dbtype = None

        if self.dbtype == 'postgresql':
            # check if PostgreSQL is enabled with PostGIS 1.x
            try:
                self.session.execute(select([func.postgis_version()]))
                temp_dbtype = 'postgresql+postgis+wkt'
                LOGGER.debug('PostgreSQL+PostGIS1+WKT detected')
            except Exception as err:
                LOGGER.debug('PostgreSQL+PostGIS1+WKT detection failed')

            # check if PostgreSQL is enabled with PostGIS 2.x
            try:
                self.session.execute('select(postgis_version())')
                temp_dbtype = 'postgresql+postgis+wkt'
                LOGGER.debug('PostgreSQL+PostGIS2+WKT detected')
            except Exception as err:
                LOGGER.debug('PostgreSQL+PostGIS2+WKT detection failed')

            # check if a native PostGIS geometry column exists
            try:
                result = self.session.execute("select f_geometry_column from geometry_columns where f_table_name = '%s' and f_geometry_column != 'wkt_geometry' limit 1;" % table)
                row = result.fetchone()
                self.postgis_geometry_column = str(row['f_geometry_column'])
                temp_dbtype = 'postgresql+postgis+native'
                LOGGER.debug('PostgreSQL+PostGIS+Native detected')
            except Exception as err:
                LOGGER.debug('PostgreSQL+PostGIS+Native not picked up: %s', str(err))

            # check if a native PostgreSQL FTS GIN index exists
            result = self.session.execute("select relname from pg_class where relname='fts_gin_idx'").scalar()
            self.fts = bool(result)
            LOGGER.debug('PostgreSQL FTS enabled: %r', self.fts)

        if temp_dbtype is not None:
            LOGGER.debug('%s support detected' % temp_dbtype)
            self.dbtype = temp_dbtype

        if self.dbtype in ['sqlite', 'sqlite3']:  # load SQLite query bindings
            # <= 0.6 behaviour
            if not __version__ >= '0.7':
                self.connection = self.engine.raw_connection()
                self.connection.create_function(
                'query_spatial', 4, util.query_spatial)
                self.connection.create_function(
                'update_xpath', 3, util.update_xpath)
                self.connection.create_function('get_anytext', 1,
                util.get_anytext)
                self.connection.create_function('get_geometry_area', 1,
                util.get_geometry_area)
                self.connection.create_function('get_spatial_overlay_rank', 2,
                util.get_spatial_overlay_rank)

        LOGGER.debug('setting repository queryables')
        # generate core queryables db and obj bindings
        self.queryables = {}

        for tname in self.context.model['typenames']:
            for qname in self.context.model['typenames'][tname]['queryables']:
                self.queryables[qname] = {}

                for qkey, qvalue in \
                self.context.model['typenames'][tname]['queryables'][qname].iteritems():
                    self.queryables[qname][qkey] = qvalue

        # flatten all queryables
        # TODO smarter way of doing this
        self.queryables['_all'] = {}
        for qbl in self.queryables:
            self.queryables['_all'].update(self.queryables[qbl])

        self.queryables['_all'].update(self.context.md_core_model['mappings'])
Exemple #44
0
session = Session()
metadata = MetaData(dba)
locati = Table('locations', metadata, autoload=True)
estimate = Table('provider_estimate', metadata, autoload=True)


class Location(object):
    pass


class Estimate(object):
    pass


session = create_session()


def get_addr(query):
    rs = query.execute()
    for row in rs:
        complete_trip.append(row[2])


def optimize(trip_name):
    s = locati.select(locati.c.trip_name == trip_name).order_by(
        locati.c.trip_order)
    get_addr(s)
    waypoints = complete_trip[:]
    del waypoints[0]
    del waypoints[-1]
 def usun(ID_Klienta):
     session = create_session(bind=engine)
     session.query(Klienci).filter(
         Klienci.ID_Klienta == ID_Klienta).delete()
     session.close()
 def usun(ID_Zajecia):
     session = create_session(bind=engine)
     session.query(Zajecia).filter(
         Zajecia.ID_Zajecia == ID_Zajecia).delete()
     session.close()
Exemple #47
0
    def test_transient_to_persistent_collection(self):
        User, Address, addresses, users = (self.classes.User,
                                           self.classes.Address,
                                           self.tables.addresses,
                                           self.tables.users)

        mapper(User,
               users,
               properties={
                   'addresses':
                   relationship(Address,
                                backref='user',
                                collection_class=OrderedSet,
                                order_by=addresses.c.id,
                                cascade="all, delete-orphan")
               })
        mapper(Address, addresses)

        load = self.load_tracker(User)
        self.load_tracker(Address, load)

        u = User(id=7,
                 name='fred',
                 addresses=OrderedSet([
                     Address(id=1, email_address='fred1'),
                     Address(id=2, email_address='fred2'),
                 ]))
        sess = create_session()
        sess.add(u)
        sess.flush()
        sess.expunge_all()

        eq_(load.called, 0)

        u = User(id=7,
                 name='fred',
                 addresses=OrderedSet([
                     Address(id=3, email_address='fred3'),
                     Address(id=4, email_address='fred4'),
                 ]))

        u = sess.merge(u)

        # 1. merges User object.  updates into session.
        # 2.,3. merges Address ids 3 & 4, saves into session.
        # 4.,5. loads pre-existing elements in "addresses" collection,
        # marks as deleted, Address ids 1 and 2.
        eq_(load.called, 5)

        eq_(
            u,
            User(id=7,
                 name='fred',
                 addresses=OrderedSet([
                     Address(id=3, email_address='fred3'),
                     Address(id=4, email_address='fred4'),
                 ])))
        sess.flush()
        sess.expunge_all()
        eq_(
            sess.query(User).one(),
            User(id=7,
                 name='fred',
                 addresses=OrderedSet([
                     Address(id=3, email_address='fred3'),
                     Address(id=4, email_address='fred4'),
                 ])))
 def odczyt():
     session = create_session(bind=engine)
     lista = session.query(Sprzet).all()
     session.close()
     return lista
Exemple #49
0
    def test_attribute_cascade(self):
        """Merge of a persistent entity with two child
        persistent entities."""

        users, Address, addresses, User = (self.tables.users,
                                           self.classes.Address,
                                           self.tables.addresses,
                                           self.classes.User)

        mapper(User,
               users,
               properties={
                   'addresses':
                   relationship(mapper(Address, addresses), backref='user')
               })
        load = self.load_tracker(User)
        self.load_tracker(Address, load)

        sess = create_session()

        # set up data and save
        u = User(id=7,
                 name='fred',
                 addresses=[
                     Address(email_address='*****@*****.**'),
                     Address(email_address='*****@*****.**')
                 ])
        sess.add(u)
        sess.flush()

        # assert data was saved
        sess2 = create_session()
        u2 = sess2.query(User).get(7)
        eq_(
            u2,
            User(id=7,
                 name='fred',
                 addresses=[
                     Address(email_address='*****@*****.**'),
                     Address(email_address='*****@*****.**')
                 ]))

        # make local changes to data
        u.name = 'fred2'
        u.addresses[1].email_address = '*****@*****.**'

        eq_(load.called, 3)

        # new session, merge modified data into session
        sess3 = create_session()
        u3 = sess3.merge(u)
        eq_(load.called, 6)

        # ensure local changes are pending
        eq_(
            u3,
            User(id=7,
                 name='fred2',
                 addresses=[
                     Address(email_address='*****@*****.**'),
                     Address(email_address='*****@*****.**')
                 ]))

        # save merged data
        sess3.flush()

        # assert modified/merged data was saved
        sess.expunge_all()
        u = sess.query(User).get(7)
        eq_(
            u,
            User(id=7,
                 name='fred2',
                 addresses=[
                     Address(email_address='*****@*****.**'),
                     Address(email_address='*****@*****.**')
                 ]))
        eq_(load.called, 9)

        # merge persistent object into another session
        sess4 = create_session()
        u = sess4.merge(u)
        assert len(u.addresses)
        for a in u.addresses:
            assert a.user is u

        def go():
            sess4.flush()

        # no changes; therefore flush should do nothing
        self.assert_sql_count(testing.db, go, 0)
        eq_(load.called, 12)

        # test with "dontload" merge
        sess5 = create_session()
        u = sess5.merge(u, load=False)
        assert len(u.addresses)
        for a in u.addresses:
            assert a.user is u

        def go():
            sess5.flush()

        # no changes; therefore flush should do nothing
        # but also, load=False wipes out any difference in committed state,
        # so no flush at all
        self.assert_sql_count(testing.db, go, 0)
        eq_(load.called, 15)

        sess4 = create_session()
        u = sess4.merge(u, load=False)
        # post merge change
        u.addresses[1].email_address = 'afafds'

        def go():
            sess4.flush()

        # afafds change flushes
        self.assert_sql_count(testing.db, go, 1)
        eq_(load.called, 18)

        sess5 = create_session()
        u2 = sess5.query(User).get(u.id)
        eq_(u2.name, 'fred2')
        eq_(u2.addresses[1].email_address, 'afafds')
        eq_(load.called, 21)
 def zwroc_poID(ID_Sprzetu):
     session = create_session(bind=engine)
     q = session.query(Sprzet).filter(Sprzet.ID_Sprzetu == ID_Sprzetu)
     rekord = q.one()
     return rekord
Exemple #51
0
    def test_offset(self):
        Foo = self.classes.Foo

        query = create_session().query(Foo)
        assert len(list(query.limit(10))) == 10
 def usun(ID_Sprzetu):
     session = create_session(bind=engine)
     session.query(Sprzet).filter(Sprzet.ID_Sprzetu == ID_Sprzetu).delete()
     session.close()
Exemple #53
0
    def test_aggregate_2(self):
        foo = self.tables.foo

        query = create_session().query(func.avg(foo.c.bar))
        avg = query.filter(foo.c.bar < 30).one()[0]
        eq_(float(round(avg, 1)), 14.5)
def generuj_raport(ID_Trenera):
    session = create_session(bind=engine)
    return session.execute('exec raport :val', {'val': ID_Trenera}).fetchall()
Exemple #55
0
    def test_offset(self):
        Foo = self.classes.Foo

        query = create_session().query(Foo)
        assert list(query.order_by(Foo.bar).offset(10))[0].bar == 10
Exemple #56
0
    def test_aggregate_1(self):
        foo = self.tables.foo

        query = create_session().query(func.sum(foo.c.bar))
        assert query.filter(foo.c.bar < 30).one() == (435, )
Exemple #57
0
    def test_selectby(self):
        Foo = self.classes.Foo

        res = create_session().query(Foo).filter_by(range=5)
        assert res.order_by(Foo.bar)[0].bar == 5
        assert res.order_by(sa.desc(Foo.bar))[0].bar == 95
Exemple #58
0
 def query3():
     session = create_session(testing.db)
     return session.query(User).first().addresses.all()
import pandas as pd
import datetime
from sqlalchemy import Table, Column, MetaData, create_engine, DateTime, Float
from sqlalchemy.orm import mapper, create_session


class Prices(object):
    __tablename__ = 'prices'
    pass


res = pd.read_excel('./assets_prices.xlsx', index_col=0)
cols = list(res.columns)
e = create_engine(
    'postgresql+psycopg2://postgres:[email protected]:5432/robo_advisor')
metadata = MetaData(bind=e)
t = Table(
    'Prices', metadata,
    Column('dt', DateTime, default=datetime.datetime.now, primary_key=True),
    *(Column(col, Float) for col in cols))
metadata.create_all()
mapper(Prices, t)
session = create_session(bind=e, autocommit=False, autoflush=True)
    def test_roundtrip(self):
        mapper(Publication, self.tables.publication)

        mapper(
            Issue,
            self.tables.issue,
            properties={
                "publication": relationship(
                    Publication,
                    backref=backref("issues", cascade="all, delete-orphan"),
                )
            },
        )

        mapper(LocationName, self.tables.location_name)

        mapper(
            Location,
            self.tables.location,
            properties={
                "issue": relationship(
                    Issue,
                    backref=backref(
                        "locations",
                        lazy="joined",
                        cascade="all, delete-orphan",
                    ),
                ),
                "_name": relationship(LocationName),
            },
        )

        mapper(PageSize, self.tables.page_size)

        mapper(
            Magazine,
            self.tables.magazine,
            properties={
                "location": relationship(
                    Location, backref=backref("magazine", uselist=False)
                ),
                "size": relationship(PageSize),
            },
        )

        if use_unions:
            page_join = polymorphic_union(
                {
                    "m": self.tables.page.join(self.tables.magazine_page),
                    "c": self.tables.page.join(self.tables.magazine_page).join(
                        self.tables.classified_page
                    ),
                    "p": self.tables.page.select(
                        self.tables.page.c.type == "p"
                    ),
                },
                None,
                "page_join",
            )
            page_mapper = mapper(
                Page,
                self.tables.page,
                with_polymorphic=("*", page_join),
                polymorphic_on=page_join.c.type,
                polymorphic_identity="p",
            )
        elif use_joins:
            page_join = self.tables.page.outerjoin(
                self.tables.magazine_page
            ).outerjoin(self.tables.classified_page)
            page_mapper = mapper(
                Page,
                self.tables.page,
                with_polymorphic=("*", page_join),
                polymorphic_on=self.tables.page.c.type,
                polymorphic_identity="p",
            )
        else:
            page_mapper = mapper(
                Page,
                self.tables.page,
                polymorphic_on=self.tables.page.c.type,
                polymorphic_identity="p",
            )

        if use_unions:
            magazine_join = polymorphic_union(
                {
                    "m": self.tables.page.join(self.tables.magazine_page),
                    "c": self.tables.page.join(self.tables.magazine_page).join(
                        self.tables.classified_page
                    ),
                },
                None,
                "page_join",
            )
            magazine_page_mapper = mapper(
                MagazinePage,
                self.tables.magazine_page,
                with_polymorphic=("*", magazine_join),
                inherits=page_mapper,
                polymorphic_identity="m",
                properties={
                    "magazine": relationship(
                        Magazine,
                        backref=backref(
                            "pages", order_by=magazine_join.c.page_no
                        ),
                    )
                },
            )
        elif use_joins:
            magazine_join = self.tables.page.join(
                self.tables.magazine_page
            ).outerjoin(self.tables.classified_page)
            magazine_page_mapper = mapper(
                MagazinePage,
                self.tables.magazine_page,
                with_polymorphic=("*", magazine_join),
                inherits=page_mapper,
                polymorphic_identity="m",
                properties={
                    "magazine": relationship(
                        Magazine,
                        backref=backref(
                            "pages", order_by=self.tables.page.c.page_no
                        ),
                    )
                },
            )
        else:
            magazine_page_mapper = mapper(
                MagazinePage,
                self.tables.magazine_page,
                inherits=page_mapper,
                polymorphic_identity="m",
                properties={
                    "magazine": relationship(
                        Magazine,
                        backref=backref(
                            "pages", order_by=self.tables.page.c.page_no
                        ),
                    )
                },
            )

        mapper(
            ClassifiedPage,
            self.tables.classified_page,
            inherits=magazine_page_mapper,
            polymorphic_identity="c",
            primary_key=[self.tables.page.c.id],
        )

        session = create_session()

        pub = Publication(name="Test")
        issue = Issue(issue=46, publication=pub)
        location = Location(ref="ABC", name="London", issue=issue)

        page_size = PageSize(name="A4", width=210, height=297)

        magazine = Magazine(location=location, size=page_size)

        page = ClassifiedPage(magazine=magazine, page_no=1)
        page2 = MagazinePage(magazine=magazine, page_no=2)
        page3 = ClassifiedPage(magazine=magazine, page_no=3)
        session.add(pub)

        session.flush()
        print([x for x in session])
        session.expunge_all()

        session.flush()
        session.expunge_all()
        p = session.query(Publication).filter(Publication.name == "Test").one()

        print(p.issues[0].locations[0].magazine.pages)
        print([page, page2, page3])
        assert repr(p.issues[0].locations[0].magazine.pages) == repr(
            [page, page2, page3]
        ), repr(p.issues[0].locations[0].magazine.pages)