def _fixture(
            self,
            dialect_name, exception, num_disconnects, is_disconnect=True):
        engine = self.engine

        event.listen(engine, "engine_connect", engines._connect_ping_listener)

        real_do_execute = engine.dialect.do_execute
        counter = itertools.count(1)

        def fake_do_execute(self, *arg, **kw):
            if next(counter) > num_disconnects:
                return real_do_execute(self, *arg, **kw)
            else:
                raise exception

        with self._dbapi_fixture(dialect_name):
            with test_utils.nested(
                mock.patch.object(engine.dialect,
                                  "do_execute",
                                  fake_do_execute),
                mock.patch.object(engine.dialect,
                                  "is_disconnect",
                                  mock.Mock(return_value=is_disconnect))
            ):
                yield
示例#2
0
 def register_events(self):
     event.listen(db.session, 'before_flush', self.before_flush)
     event.listen(db.session, 'after_flush', self.after_flush)
     event.listen(db.session, 'after_flush_postexec',
                  self.after_flush_postexec)
     event.listen(db.session, 'before_commit', self.before_commit)
     event.listen(db.session, 'after_commit', self.after_commit)
示例#3
0
 def __declare_last__(cls):
     event.listen(cls, 'before_update', cls.validate_before_update)
     event.listen(cls, 'before_insert', cls.validate_before_insert)
     # event.listen(cls, 'before_delete', cls.validate_before_delete)
     event.listen(cls, 'after_insert', cls.add_to_search)
     event.listen(cls, 'after_update', cls.update_search_table)
     event.listen(cls, 'after_delete', cls.delete_from_search)
示例#4
0
    def test_parent_instance_child_class_apply_after(self):
        l1 = Mock()
        l2 = Mock()

        event.listen(self.TargetElement, "event_one", l2)

        factory = self.TargetFactory()
        element = factory.create()

        element.run_event(1)

        event.listen(factory, "event_one", l1)

        element.run_event(2)
        element.run_event(3)

        # c1 gets no events due to _JoinedListener
        # fixing the "parent" at construction time.
        # this can be changed to be "live" at the cost
        # of performance.
        eq_(
            l1.mock_calls, []
        )
        eq_(
            l2.mock_calls,
            [call(element, 1), call(element, 2), call(element, 3)]
        )
示例#5
0
 def init_sqlalchemy(self, scheme, connection):
     try:
         import sqlalchemy
         from sqlalchemy import create_engine, MetaData
         from sqlalchemy.orm import scoped_session, sessionmaker
         from torweb.db import CacheQuery
         import _mysql_exceptions
         from sqlalchemy import event
         from sqlalchemy.exc import DisconnectionError
         def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
             try:
                 dbapi_conn.cursor().execute('select now()')
             except _mysql_exceptions.OperationalError:
                 raise DisconnectionError
         
         engine = create_engine(
             connection,
             convert_unicode=True,
             encoding="utf-8",
             pool_recycle=3600*7,
             #echo_pool=True,
             echo=False,
         )
         event.listen(engine, 'checkout', my_on_checkout)
         metadata = MetaData(bind=engine)
         session = scoped_session(sessionmaker(bind=engine, query_cls=CacheQuery))
         sqlalchemy_sessions = [session]
         DB_Session = sessionmaker(bind=engine)
         return {"metadata":metadata, "session":session, "sqlalchemy_sessions":sqlalchemy_sessions}
         #setattr(self.app, 'metadata', metadata)
         #setattr(self.app, scheme.get('sqlalchemy', 'session'), session)
         #setattr(self.app, 'sqlalchemy_sessions', sqlalchemy_sessions)
     except Exception as e:
         print e
示例#6
0
 def _listener(self, engine, listener_func):
     try:
         event.listen(engine, 'before_execute', listener_func)
         yield
     finally:
         event.remove(engine, 'before_execute',
                      listener_func)
 def register_signals(self, app):
     """Register the signals."""
     before_record_index.connect(inject_provisional_community)
     if app.config['COMMUNITIES_OAI_ENABLED']:
         listen(Community, 'after_insert', create_oaipmh_set)
         listen(Community, 'after_delete', destroy_oaipmh_set)
     inclusion_request_created.connect(new_request)
示例#8
0
文件: sa.py 项目: enfoundry/WebCore
 def setup(self):
     self.model.__dict__['engine'] = engine_from_config(self.config, prefix="%s.sqlalchemy." % (self.prefix, ))
     self.model.metadata.bind = self.model.engine
     
     if self.soup:
         from sqlalchemy.ext.sqlsoup import SqlSoup, Session
         self.model.__dict__['soup'] = SqlSoup(self.model.metadata)
         self._session = Session
     
     else:
         args = dict(
                 bind = self.model.engine,
                 autocommit = asbool(self.config.get('%s.autocommit' % (self.prefix, ), False)),
                 autoflush = asbool(self.config.get('%s.autoflush' % (self.prefix, ), True)),
                 twophase = asbool(self.config.get('%s.twophase' % (self.prefix, ), False)),
             )
         
         setup = getattr(self.model, 'setup', None)
         if hasattr(setup, '__call__'):
             args = setup(args)
         
         self._session = sessionmaker(**args)
     
     populate = getattr(self.model, 'populate', None)
     if hasattr(populate, '__call__'):
         for table in self.model.metadata.sorted_tables:
             event.listen(table, 'after_create', self.populate_table)
示例#9
0
    def listen(self, cls, prp, handler):
        if (cls, prp) not in self.handlers:
            self.handlers[(cls, prp)] = list()
            event.listen(class_mapper(cls)._props[prp],
                         'set', self.make_callback(cls, prp))

        self.handlers[(cls, prp)].append(handler)
示例#10
0
    def test_parent_instance_child_class_apply_after(self):
        l1 = Mock()
        l2 = Mock()

        event.listen(self.TargetElement, "event_one", l2)

        factory = self.TargetFactory()
        element = factory.create()

        element.run_event(1)

        event.listen(factory, "event_one", l1)

        element.run_event(2)
        element.run_event(3)

        # if _JoinedListener fixes .listeners
        # at construction time, then we don't get
        # the new listeners.
        #eq_(l1.mock_calls, [])

        # alternatively, if _JoinedListener shares the list
        # using a @property, then we get them, at the arguable
        # expense of the extra method call to access the .listeners
        # collection
        eq_(
            l1.mock_calls, [call(element, 2), call(element, 3)]
        )

        eq_(
            l2.mock_calls,
            [call(element, 1), call(element, 2), call(element, 3)]
        )
示例#11
0
    def test_all_events(self):
        canary = []
        def before_attach(obj, parent):
            canary.append("%s->%s" % (obj.__class__.__name__, parent.__class__.__name__))

        def after_attach(obj, parent):
            canary.append("%s->%s" % (obj.__class__.__name__, parent))

        event.listen(schema.SchemaItem, "before_parent_attach", before_attach)
        event.listen(schema.SchemaItem, "after_parent_attach", after_attach)

        m = MetaData()
        t1 = Table('t1', m, 
            Column('id', Integer, Sequence('foo_id'), primary_key=True),
            Column('bar', String, ForeignKey('t2.id'))
        )
        t2 = Table('t2', m,
            Column('id', Integer, primary_key=True),
        )

        eq_(
            canary,
            ['Sequence->Column', 'Sequence->id', 'ForeignKey->Column',
             'ForeignKey->bar', 'Table->MetaData',
             'PrimaryKeyConstraint->Table', 'PrimaryKeyConstraint->t1',
             'Column->Table', 'Column->t1', 'Column->Table',
             'Column->t1', 'ForeignKeyConstraint->Table',
             'ForeignKeyConstraint->t1', 'Table->MetaData(bind=None)',
             'Table->MetaData', 'PrimaryKeyConstraint->Table',
             'PrimaryKeyConstraint->t2', 'Column->Table', 'Column->t2',
             'Table->MetaData(bind=None)']
        )
示例#12
0
    def test_propagate(self):
        Target = self._fixture()

        m1 = Mock()

        t1 = Target()
        t2 = Target()

        event.listen(t1, "event_one", m1, propagate=True)
        event.listen(t1, "event_two", m1, propagate=False)

        t2.dispatch._update(t1.dispatch)

        t1.dispatch.event_one("t1e1x")
        t1.dispatch.event_two("t1e2x")
        t2.dispatch.event_one("t2e1x")
        t2.dispatch.event_two("t2e2x")

        event.remove(t1, "event_one", m1)
        event.remove(t1, "event_two", m1)

        t1.dispatch.event_one("t1e1y")
        t1.dispatch.event_two("t1e2y")
        t2.dispatch.event_one("t2e1y")
        t2.dispatch.event_two("t2e2y")

        eq_(m1.mock_calls,
                [call('t1e1x'), call('t1e2x'),
                call('t2e1x')])
示例#13
0
    def test_listener_collection_removed_cleanup(self):
        from sqlalchemy.event import registry

        Target = self._fixture()

        m1 = Mock()

        t1 = Target()

        event.listen(t1, "event_one", m1)

        key = (id(t1), "event_one", id(m1))

        assert key in registry._key_to_collection
        collection_ref = list(registry._key_to_collection[key])[0]
        assert collection_ref in registry._collection_to_key

        t1.dispatch.event_one("t1")

        del t1

        gc_collect()

        assert key not in registry._key_to_collection
        assert collection_ref not in registry._collection_to_key
示例#14
0
def handle_error(engine, listener):
    """Add a handle_error listener for the given :class:`.Engine`.

    This listener uses the SQLAlchemy
    :meth:`sqlalchemy.event.ConnectionEvents.handle_error`
    event, however augments the listener for pre-0.9.7 versions of SQLAlchemy
    in order to support safe re-raise of the exception.

    """
    if utils.sqla_100:
        event.listen(engine, "handle_error", listener)
        return

    assert isinstance(engine, Engine), \
        "engine argument must be an Engine instance, not a Connection"

    if not utils.sqla_097:
        _rework_handle_exception_for_events(engine)
        engine._oslo_handle_error_events.append(listener)

    _rework_connect_and_revalidate_for_events(engine)

    if utils.sqla_097:
        # ctx.engine added per
        # https://bitbucket.org/zzzeek/sqlalchemy/issue/3266/
        def wrap_listener(ctx):
            if isinstance(ctx, engine_base.ExceptionContextImpl):
                ctx.engine = ctx.connection.engine
            return listener(ctx)
        event.listen(engine, "handle_error", wrap_listener)
示例#15
0
    def test_instance(self):
        Target = self._fixture()

        class Foo(object):
            def __init__(self):
                self.mock = Mock()

            def evt(self, arg):
                self.mock(arg)

        f1 = Foo()
        f2 = Foo()

        event.listen(Target, "event_one", f1.evt)
        event.listen(Target, "event_one", f2.evt)

        t1 = Target()
        t1.dispatch.event_one("x")

        event.remove(Target, "event_one", f1.evt)

        t1.dispatch.event_one("y")

        eq_(f1.mock.mock_calls, [call("x")])
        eq_(f2.mock.mock_calls, [call("x"), call("y")])
示例#16
0
    def test_bool_clslevel(self):
        def listen_one(x, y):
            pass

        event.listen(self.Target, "event_one", listen_one)
        t = self.Target()
        assert t.dispatch.event_one
示例#17
0
 def register(self):
     event.listen(
         self.engine, 'before_cursor_execute', self.before_cursor_execute
     )
     event.listen(
         self.engine, 'after_cursor_execute', self.after_cursor_execute
     )
示例#18
0
    def __init__(self):

        if app.config['SQL_DRIVER'] == 'pymssql':
          engine = create_engine(r"mssql+pymssql://{0}:{1}@{2}:{3}/{4}".format(
                                          app.config['DATABASE_USER'],
                                          app.config['DATABASE_PASSWORD'],
                                          app.config['DATABASE_HOST'],
                                          app.config['DATABASE_PORT'],
                                          app.config['DATABASE_NAME']))

        else:
          quoted = urllib.quote_plus('DRIVER={FreeTDS};Server=%s;Database=%s;UID=%s;PWD=%s;TDS_Version=8.0;CHARSET=UTF8;Port=1433;'
                                          %(app.config['DATABASE_HOST'],
                                            app.config['DATABASE_NAME'],
                                            app.config['DATABASE_USER'],
                                            app.config['DATABASE_PASSWORD']))

          engine = create_engine('mssql+pyodbc:///?odbc_connect={}'.format(quoted), connect_args={'convert_unicode': True})

        # create a Session
        Session = sessionmaker(bind=engine)

        try:
            self.session = Session()
            event.listen(Session, "after_transaction_create", self.session.execute('SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED'))
            
            Log.info('Connection Openned')
        except:
            Log.info('Can\'t create database session')
            raise
示例#19
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    engine = engine_from_config(
        config.get_section(config.config_ini_section),
        prefix='sqlalchemy.',
        poolclass=pool.NullPool)

    if config.get_main_option('bdr').strip().lower() == 'true':
        def enable_bdr(connection, connection_record):
            with connection.cursor() as cursor:
                cursor.execute('SET LOCAL bdr.permit_ddl_locking = true')
        event.listen(engine, 'connect', enable_bdr)

    connection = engine.connect()
    context.configure(
        connection=connection,
        target_metadata=target_metadata)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
示例#20
0
    def test_accounting_commit_fails_delete(self):
        User = self.classes.User
        sess = create_session(autocommit=True)

        fail = False

        def fail_fn(*arg, **kw):
            if fail:
                raise Exception("commit fails")

        event.listen(sess, "after_flush_postexec", fail_fn)
        u1 = User(name='ed')
        sess.add(u1)
        sess.flush()

        sess.delete(u1)
        fail = True
        assert_raises(
            Exception,
            sess.flush
        )
        fail = False

        assert u1 in sess
        assert u1 not in sess.deleted
        sess.delete(u1)
        sess.flush()
        assert u1 not in sess
        eq_(
            sess.query(User.name).order_by(User.name).all(),
            []
        )
示例#21
0
    def define_triggers_and_indexes(self, mapper, cls):
        columns = self.inspect_columns(cls)
        for column in columns:
            # We don't want sqlalchemy to know about this column so we add it
            # externally.
            table = cls.__table__

            column_name = '%s_%s' % (table.name, column.name)

            if column_name in self.processed_columns:
                continue

            # This indexes the tsvector column.
            event.listen(
                table,
                'after_create',
                self.search_index_ddl(column)
            )

            # This sets up the trigger that keeps the tsvector column up to
            # date.
            if column.type.columns:
                event.listen(
                    table,
                    'after_create',
                    self.search_trigger_ddl(column)
                )

            self.processed_columns.append(column_name)
示例#22
0
文件: filedepot.py 项目: Kotti/Kotti
def includeme(config: Configurator) -> None:
    """ Pyramid includeme hook.

    :param config: app config
    :type config: :class:`pyramid.config.Configurator`
    """

    config.add_tween(
        "kotti.filedepot.TweenFactory", over=tweens.MAIN, under=tweens.INGRESS
    )
    config.add_request_method(uploaded_file_response, name="uploaded_file_response")
    config.add_request_method(uploaded_file_url, name="uploaded_file_url")

    from kotti.events import objectevent_listeners
    from kotti.events import ObjectInsert
    from kotti.events import ObjectUpdate

    from sqlalchemy.event import listen
    from sqlalchemy.engine import Engine

    listen(Engine, "engine_connect", adjust_for_engine)

    configure_filedepot(config.get_settings())

    # Update file metadata on change of blob data
    objectevent_listeners[(ObjectInsert, DBStoredFile)].append(set_metadata)
    objectevent_listeners[(ObjectUpdate, DBStoredFile)].append(set_metadata)

    # depot's _SQLAMutationTracker._session_committed is executed on
    # after_commit, that's too late for DBFileStorage to interact with the
    # session
    event.listen(DBSession, "before_commit", _SQLAMutationTracker._session_committed)
示例#23
0
def file_column(column):
    def set_event_listner(target, value, oldvalue, initiator):
        if oldvalue:
            full_path = FileProcess.fullPath(oldvalue)
            if (os.path.isfile(full_path)):
                os.unlink(full_path)
    alchemy_event.listen(column, 'set', set_event_listner)
示例#24
0
def initialize_sql_test(engine):
    DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
    DBSession.configure(bind=engine)
    Base.metadata.bind = engine
    Base.metadata.create_all(engine)
    listen(sqlalchemy.orm.mapper, 'before_insert', make_uuid)
    return [DBSession, Base]
    def define_temp_tables(cls, metadata):
        # cheat a bit, we should fix this with some dialect-level
        # temp table fixture
        if testing.against("oracle"):
            kw = {
                'prefixes': ["GLOBAL TEMPORARY"],
                'oracle_on_commit': 'PRESERVE ROWS'
            }
        else:
            kw = {
                'prefixes': ["TEMPORARY"],
            }

        user_tmp = Table(
            "user_tmp", metadata,
            Column("id", sa.INT, primary_key=True),
            Column('name', sa.VARCHAR(50)),
            Column('foo', sa.INT),
            sa.UniqueConstraint('name', name='user_tmp_uq'),
            sa.Index("user_tmp_ix", "foo"),
            **kw
        )
        if testing.requires.view_reflection.enabled and \
                testing.requires.temporary_views.enabled:
            event.listen(
                user_tmp, "after_create",
                DDL("create temporary view user_tmp_v as "
                    "select * from user_tmp")
            )
            event.listen(
                user_tmp, "before_drop",
                DDL("drop view user_tmp_v")
            )
示例#26
0
    def test_transactional(self):
        canary = []
        def tracker(name):
            def go(conn, *args, **kw):
                canary.append(name)
            return go

        engine = engines.testing_engine()
        event.listen(engine, 'before_execute', tracker('execute'))
        event.listen(engine, 'before_cursor_execute', tracker('cursor_execute'))
        event.listen(engine, 'begin', tracker('begin'))
        event.listen(engine, 'commit', tracker('commit'))
        event.listen(engine, 'rollback', tracker('rollback'))

        conn = engine.connect()
        trans = conn.begin()
        conn.execute(select([1]))
        trans.rollback()
        trans = conn.begin()
        conn.execute(select([1]))
        trans.commit()

        eq_(canary, [
            'begin', 'execute', 'cursor_execute', 'rollback',
            'begin', 'execute', 'cursor_execute', 'commit',
            ])
示例#27
0
    def test_transactional_advanced(self):
        canary = []
        def tracker(name):
            def go(*args, **kw):
                canary.append(name)
            return go

        engine = engines.testing_engine()
        for name in ['begin', 'savepoint', 
                    'rollback_savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase', 
                       'prepare_twophase', 'commit_twophase']:
            event.listen(engine, '%s' % name, tracker(name))

        conn = engine.connect()

        trans = conn.begin()
        trans2 = conn.begin_nested()
        conn.execute(select([1]))
        trans2.rollback()
        trans2 = conn.begin_nested()
        conn.execute(select([1]))
        trans2.commit()
        trans.rollback()

        trans = conn.begin_twophase()
        conn.execute(select([1]))
        trans.prepare()
        trans.commit()

        eq_(canary, ['begin', 'savepoint', 
                    'rollback_savepoint', 'savepoint', 'release_savepoint',
                    'rollback', 'begin_twophase', 
                       'prepare_twophase', 'commit_twophase']
        )
示例#28
0
文件: filedepot.py 项目: Kotti/Kotti
    def __declare_last__(cls) -> None:
        """ Executed by SQLAlchemy as part of mapper configuration

        When the data changes, we want to reset the cursor position of target
        instance, to allow proper streaming of data.
        """
        event.listen(DBStoredFile.data, "set", handle_change_data)
示例#29
0
    def test_retval_flag(self):
        canary = []
        def tracker(name):
            def go(conn, *args, **kw):
                canary.append(name)
            return go

        def execute(conn, clauseelement, multiparams, params):
            canary.append('execute')
            return clauseelement, multiparams, params

        def cursor_execute(conn, cursor, statement, 
                        parameters, context, executemany):
            canary.append('cursor_execute')
            return statement, parameters

        engine = engines.testing_engine()

        assert_raises(
            tsa.exc.ArgumentError,
            event.listen, engine, "begin", tracker("begin"), retval=True
        )

        event.listen(engine, "before_execute", execute, retval=True)
        event.listen(engine, "before_cursor_execute", cursor_execute, retval=True)
        engine.execute(select([1]))
        eq_(
            canary, ['execute', 'cursor_execute']
        )
示例#30
0
        def test_before_update_m2o(self):
            """Expect normal many to one attribute load behavior
            (should not get committed value)
            from within public 'before_update' event"""
            sess = self._mapper_setup()

            Address, User = self.classes.Address, self.classes.User

            def before_update(mapper, connection, target):
                # if get committed is used to find target.user, then
                # it will be still be u1 instead of u2
                assert target.user.id == target.user_id == u2.id
            from sqlalchemy import event
            event.listen(Address, 'before_update', before_update)

            a1 = Address(email_address='a1')
            u1 = User(name='u1', addresses=[a1])
            sess.add(u1)

            u2 = User(name='u2')
            sess.add(u2)
            sess.commit()

            sess.expunge_all()
            # lookup an address and move it to the other user
            a1 = sess.query(Address).get(a1.id)

            # move address to another user's fk
            assert a1.user_id == u1.id
            a1.user_id = u2.id

            sess.flush()
示例#31
0
    def init_app(self, app):
        Service.init_app(self, app)

        if not self._listening:
            event.listen(Session, "after_flush", self.create_audit_entries)
            self._listening = True
示例#32
0
文件: db.py 项目: ourway/fearless
 
    @staticmethod
    def _record(mapper, target, operation):
        s = object_session(target)
        if isinstance(s, SignallingSession):
            pk = tuple(mapper.primary_key_from_instance(target))
            s._model_changes[pk] = (target, operation)


# Usage
 
# this must happen only once
_MapperSignalEvents(orm.mapper).register()
_SessionSignalEvents().register()



#engine = create_engine(DB, echo=False, convert_unicode=True)
engine = create_engine(DB, echo=False,
            convert_unicode=True, pool_recycle=3600,
                       pool_size=256, max_overflow=128)


event.listen(engine, 'checkout', checkout_listener)

#engine = create_engine("postgresql+psycopg2://farsheed:rrferl@localhost:5432/fearless2")
#engine.raw_connection().connection.text_factory = str
#Session = mptt_sessionmaker(sessionmaker(bind=engine, expire_on_commit=False))
session_factory = sessionmaker(bind=engine)
Session = scoped_session(session_factory)
示例#33
0
class NdtPose(Base):
    __tablename__ = 'menu_item'

    id = Column(Integer, primary_key=True)
    file_id = Column(Integer, ForeignKey('files.id'))
    geom = Column(Geometry(geometry_type='POLYGON', management=True))

    @property
    def serialize(self):
        nt_NdtPose = namedtuple('nt_NdtPose', ['file_name'])
        print ops.Ops().st_contains(0, 1)
        return nt_NdtPose(file_name=ops.Ops().get_filename_by_id(
            self.file_id), )


def initizlize():
    conn = engine.connect()
    conn.execute(select([func.InitSpatialMetaData()]))
    conn.close()

    Files.__table__.create(engine)
    NdtPose.__table__.create(engine)


bagspath = os.environ['BAGSPATH']
dbname = 'sqlite:///' + bagspath + '/index.db'
engine = create_engine(dbname)
listen(engine, 'connect', load_spatialite)

if not os.path.exists(bagspath + '/index.db'):
    initizlize()
示例#34
0
def _compare_tables(conn_table_names, metadata_table_names, object_filters,
                    inspector, metadata, diffs, autogen_context):

    default_schema = inspector.bind.dialect.default_schema_name

    # tables coming from the connection will not have "schema"
    # set if it matches default_schema_name; so we need a list
    # of table names from local metadata that also have "None" if schema
    # == default_schema_name.  Most setups will be like this anyway but
    # some are not (see #170)
    metadata_table_names_no_dflt_schema = OrderedSet([
        (schema if schema != default_schema else None, tname)
        for schema, tname in metadata_table_names
    ])

    # to adjust for the MetaData collection storing the tables either
    # as "schemaname.tablename" or just "tablename", create a new lookup
    # which will match the "non-default-schema" keys to the Table object.
    tname_to_table = dict(
        (no_dflt_schema,
         metadata.tables[sa_schema._get_table_key(tname, schema)])
        for no_dflt_schema, (schema, tname) in zip(
            metadata_table_names_no_dflt_schema, metadata_table_names))
    metadata_table_names = metadata_table_names_no_dflt_schema

    for s, tname in metadata_table_names.difference(conn_table_names):
        name = '%s.%s' % (s, tname) if s else tname
        metadata_table = tname_to_table[(s, tname)]
        if _run_filters(metadata_table, tname, "table", False, None,
                        object_filters):
            diffs.append(("add_table", metadata_table))
            log.info("Detected added table %r", name)
            _compare_indexes_and_uniques(s, tname, object_filters, None,
                                         metadata_table, diffs,
                                         autogen_context, inspector)

    removal_metadata = sa_schema.MetaData()
    for s, tname in conn_table_names.difference(metadata_table_names):
        name = sa_schema._get_table_key(tname, s)
        exists = name in removal_metadata.tables
        t = sa_schema.Table(tname, removal_metadata, schema=s)

        if not exists:
            event.listen(
                t, "column_reflect",
                autogen_context['context'].impl._compat_autogen_column_reflect(
                    inspector))
            inspector.reflecttable(t, None)
        if _run_filters(t, tname, "table", True, None, object_filters):
            diffs.append(("remove_table", t))
            log.info("Detected removed table %r", name)

    existing_tables = conn_table_names.intersection(metadata_table_names)

    existing_metadata = sa_schema.MetaData()
    conn_column_info = {}
    for s, tname in existing_tables:
        name = sa_schema._get_table_key(tname, s)
        exists = name in existing_metadata.tables
        t = sa_schema.Table(tname, existing_metadata, schema=s)
        if not exists:
            event.listen(
                t, "column_reflect",
                autogen_context['context'].impl._compat_autogen_column_reflect(
                    inspector))
            inspector.reflecttable(t, None)
        conn_column_info[(s, tname)] = t

    for s, tname in sorted(existing_tables, key=lambda x: (x[0] or '', x[1])):
        s = s or None
        name = '%s.%s' % (s, tname) if s else tname
        metadata_table = tname_to_table[(s, tname)]
        conn_table = existing_metadata.tables[name]

        if _run_filters(metadata_table, tname, "table", False, conn_table,
                        object_filters):
            with _compare_columns(s, tname, object_filters, conn_table,
                                  metadata_table, diffs, autogen_context,
                                  inspector):
                _compare_indexes_and_uniques(s, tname, object_filters,
                                             conn_table, metadata_table, diffs,
                                             autogen_context, inspector)
                _compare_foreign_keys(s, tname, object_filters, conn_table,
                                      metadata_table, diffs, autogen_context,
                                      inspector)
示例#35
0
 def register(self):
     event.listen(self.engine, 'before_cursor_execute',
                  self.before_cursor_execute)
     event.listen(self.engine, 'after_cursor_execute',
                  self.after_cursor_execute)
示例#36
0
def make_searchable(mapper=sa.orm.mapper, manager=search_manager, options={}):
    manager.options.update(options)
    event.listen(mapper, 'instrument_class', manager.process_mapper)
    event.listen(mapper, 'after_configured', manager.attach_ddl_listeners)
示例#37
0
 def add_listener(self, args):
     self.listeners.append(args)
     event.listen(*args)
示例#38
0
    #
    #     return self

    def get_article_owner_portal(self, **kwargs):
        return [art_port_div.division.portal for art_port_div in self.portal_article if kwargs][0]

    @staticmethod
    def update_article(company_id, article_id, **kwargs):
        db(ArticleCompany, company_id=company_id, id=article_id).update(kwargs)


def set_long_striped(mapper, connection, target):
    target.long_stripped = MLStripper().strip_tags(target.long)


event.listen(ArticlePortalDivision, 'before_update', set_long_striped)
event.listen(ArticlePortalDivision, 'before_insert', set_long_striped)
event.listen(ArticleCompany, 'before_update', set_long_striped)
event.listen(ArticleCompany, 'before_insert', set_long_striped)


class Article(Base, PRBase):
    __tablename__ = 'article'

    id = Column(TABLE_TYPES['id_profireader'], primary_key=True)
    author_user_id = Column(TABLE_TYPES['id_profireader'],
                            ForeignKey('user.id'), nullable=False)

    submitted_versions = relationship(ArticleCompany,
                                      primaryjoin="and_(Article.id==ArticleCompany.article_id, "
                                                  "ArticleCompany.company_id!=None)")
示例#39
0
    as the user, this effectively makes it blank.

    This callback function is called for every database connection.

    For the full details of this issue, see:
    http://groups.google.com/group/sqlalchemy/browse_thread/thread/88b5cc5c12246220

    dbapi_con - type: psycopg2._psycopg.connection
    connection_record - type: sqlalchemy.pool._ConnectionRecord
    '''
    cursor = dbapi_con.cursor()
    cursor.execute('SET search_path TO "$user",functions,public')
    dbapi_con.commit()


listen(Pool, 'connect', clearSearchPathCallback)


class DatabaseConnection(object):
    '''This class defines an object that makes a connection to a database.
       The "DatabaseConnection" object takes as its parameter the SQLAlchemy
       database connection string.

       This class is best called from another class that contains the
       actual connection information (so that it can be reused for different
       connections).

       This class implements the singleton design pattern. The first time the
       object is created, it *requires* a valid database connection string.
       Every time it is called via:
示例#40
0
 def _listener(self, engine, listener_func):
     try:
         event.listen(engine, 'before_execute', listener_func)
         yield
     finally:
         event.remove(engine, 'before_execute', listener_func)
示例#41
0
def _compare_tables(
    conn_table_names,
    metadata_table_names,
    inspector,
    upgrade_ops,
    autogen_context,
):

    default_schema = inspector.bind.dialect.default_schema_name

    # tables coming from the connection will not have "schema"
    # set if it matches default_schema_name; so we need a list
    # of table names from local metadata that also have "None" if schema
    # == default_schema_name.  Most setups will be like this anyway but
    # some are not (see #170)
    metadata_table_names_no_dflt_schema = OrderedSet([
        (schema if schema != default_schema else None, tname)
        for schema, tname in metadata_table_names
    ])

    # to adjust for the MetaData collection storing the tables either
    # as "schemaname.tablename" or just "tablename", create a new lookup
    # which will match the "non-default-schema" keys to the Table object.
    tname_to_table = dict((
        no_dflt_schema,
        autogen_context.table_key_to_table[sa_schema._get_table_key(
            tname, schema)],
    ) for no_dflt_schema, (schema, tname) in zip(
        metadata_table_names_no_dflt_schema, metadata_table_names))
    metadata_table_names = metadata_table_names_no_dflt_schema

    for s, tname in metadata_table_names.difference(conn_table_names):
        name = "%s.%s" % (s, tname) if s else tname
        metadata_table = tname_to_table[(s, tname)]
        if autogen_context.run_filters(metadata_table, tname, "table", False,
                                       None):
            upgrade_ops.ops.append(
                ops.CreateTableOp.from_table(metadata_table))
            log.info("Detected added table %r", name)
            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)

            comparators.dispatch("table")(
                autogen_context,
                modify_table_ops,
                s,
                tname,
                None,
                metadata_table,
            )
            if not modify_table_ops.is_empty():
                upgrade_ops.ops.append(modify_table_ops)

    removal_metadata = sa_schema.MetaData()
    for s, tname in conn_table_names.difference(metadata_table_names):
        name = sa_schema._get_table_key(tname, s)
        exists = name in removal_metadata.tables
        t = sa_schema.Table(tname, removal_metadata, schema=s)

        if not exists:
            event.listen(
                t,
                "column_reflect",
                # fmt: off
                autogen_context.migration_context.impl.
                _compat_autogen_column_reflect(inspector),
                # fmt: on
            )
            inspector.reflecttable(t, None)
        if autogen_context.run_filters(t, tname, "table", True, None):

            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)

            comparators.dispatch("table")(autogen_context, modify_table_ops, s,
                                          tname, t, None)
            if not modify_table_ops.is_empty():
                upgrade_ops.ops.append(modify_table_ops)

            upgrade_ops.ops.append(ops.DropTableOp.from_table(t))
            log.info("Detected removed table %r", name)

    existing_tables = conn_table_names.intersection(metadata_table_names)

    existing_metadata = sa_schema.MetaData()
    conn_column_info = {}
    for s, tname in existing_tables:
        name = sa_schema._get_table_key(tname, s)
        exists = name in existing_metadata.tables
        t = sa_schema.Table(tname, existing_metadata, schema=s)
        if not exists:
            event.listen(
                t,
                "column_reflect",
                # fmt: off
                autogen_context.migration_context.impl.
                _compat_autogen_column_reflect(inspector),
                # fmt: on
            )
            inspector.reflecttable(t, None)
        conn_column_info[(s, tname)] = t

    for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])):
        s = s or None
        name = "%s.%s" % (s, tname) if s else tname
        metadata_table = tname_to_table[(s, tname)]
        conn_table = existing_metadata.tables[name]

        if autogen_context.run_filters(metadata_table, tname, "table", False,
                                       conn_table):

            modify_table_ops = ops.ModifyTableOps(tname, [], schema=s)
            with _compare_columns(
                    s,
                    tname,
                    conn_table,
                    metadata_table,
                    modify_table_ops,
                    autogen_context,
                    inspector,
            ):

                comparators.dispatch("table")(
                    autogen_context,
                    modify_table_ops,
                    s,
                    tname,
                    conn_table,
                    metadata_table,
                )

            if not modify_table_ops.is_empty():
                upgrade_ops.ops.append(modify_table_ops)
示例#42
0
 def __declare_last__(cls):
     event.listen(cls, "before_update", cls._updated_at)
示例#43
0
 def register(self):
     listen(SessionBase, 'before_commit', self.session_signal_before_commit)
     listen(SessionBase, 'after_commit', self.session_signal_after_commit)
     listen(SessionBase, 'after_rollback', self.session_signal_after_rollback)
示例#44
0
        title = 'Gallery title'
        slug = slugify(title, to_lower=True)
        ret[slug] = cls(title=title, slug=slug, content='Gallery', active=True)

        title = 'Events title'
        slug = slugify(title, to_lower=True)
        ret[slug] = cls(title=title, slug=slug, content='Events', active=True)

        title = 'Contact title'
        slug = slugify(title, to_lower=True)
        ret[slug] = cls(title=title, slug=slug, content='Contact', active=True)

        return ret


event.listen(ShortTextContentBlock, 'before_insert',
             update_timestamps_before_insert)
event.listen(ShortTextContentBlock, 'before_update',
             update_timestamps_before_update)

event.listen(ShortTextContentBlock, 'before_insert',
             update_confirmedat_before_save)
event.listen(ShortTextContentBlock, 'before_update',
             update_confirmedat_before_save)

event.listen(ShortTextContentBlock, 'before_insert', update_slug_before_save)
event.listen(ShortTextContentBlock, 'before_update', update_slug_before_save)


class RichTextContentBlock(SurrogatePK, Slugged, TimeStamped, Confirmable,
                           Model):
    __tablename__ = 'rich_text_content_block'
def register(
    session,
    initial_state=STATUS_ACTIVE,
    transaction_manager=zope_transaction.manager,
    keep_session=False,
):
    """Register ZopeTransaction listener events on the
    given Session or Session factory/class.

    This function requires at least SQLAlchemy 0.7 and makes use
    of the newer sqlalchemy.event package in order to register event listeners
    on the given Session.

    The session argument here may be a Session class or subclass, a
    sessionmaker or scoped_session instance, or a specific Session instance.
    Event listening will be specific to the scope of the type of argument
    passed, including specificity to its subclass as well as its identity.

    """
    from sqlalchemy import event

    ext = ZopeTransactionEvents(
        initial_state=initial_state,
        transaction_manager=transaction_manager,
        keep_session=keep_session,
    )

    event.listen(session, "after_begin", ext.after_begin)
    event.listen(session, "after_attach", ext.after_attach)
    event.listen(session, "after_flush", ext.after_flush)
    event.listen(session, "after_bulk_update", ext.after_bulk_update)
    event.listen(session, "after_bulk_delete", ext.after_bulk_delete)
    event.listen(session, "before_commit", ext.before_commit)
示例#46
0
 def register(self):
     listen(self.mapper, 'after_delete', self.mapper_signal_after_delete)
     listen(self.mapper, 'after_insert', self.mapper_signal_after_insert)
     listen(self.mapper, 'after_update', self.mapper_signal_after_update)
示例#47
0
 def __declare_last__(cls):
     event.listen(cls, "before_update", cls._active_at)
     event.listen(cls, "before_update", cls._inactive_at)
示例#48
0
def setup_schema(Base):
    '''
    https://marshmallow-sqlalchemy.readthedocs.io/en/latest/recipes.html#automatically-generating-schemas-for-sqlalchemy-models
    '''

    # Create a function which incorporates the Base and session information
    def setup_schema_fn():
        for class_ in Base._decl_class_registry.values():
            if hasattr(class_, "__tablename__"):
                if class_.__name__.endswith("Schema"):
                    raise ModelConversionError(
                        "For safety, setup_schema can not be used when a"
                        "Model class ends with 'Schema'")

                class Meta(object):
                    model = class_
                    load_instance = True
                    include_fk = True

                schema_class_name = "%sSchema" % class_.__name__

                schema_class = type(schema_class_name,
                                    (SQLAlchemyAutoSchema, ), {"Meta": Meta})

                setattr(class_, "__schema__", schema_class)

    return setup_schema_fn


event.listen(mapper, "after_configured", setup_schema(base.Base))
示例#49
0
def generate_json():
    # TODO: Async database access
    engine = get_engine()
    # Prepare session
    Session = sessionmaker(bind=engine, autoflush=False)
    session = Session()

    # Count number of queries
    def query_counter(*_):
        query_counter.count += 1

    query_counter.count = 0
    event.listen(engine, "before_cursor_execute", query_counter)

    # Print number of employees
    total_number_of_employees = session.query(Bruger).count()
    print("Total employees:", total_number_of_employees)

    def filter_missing_entry(entry_map, entry_type, unit_uuid, entry):
        if unit_uuid not in entry_map:
            logger.error(entry_type + " not found in map: " + str(unit_uuid))
            return False
        return True

    def enrich_org_unit_with_x(org_unit_map, entry_type, entry_gen, entries):
        def gen_entry(x, bruger):
            return x.enhed_uuid, entry_gen(x, bruger)

        # Bind two arguments so the function only takes unit_uuid, entry.
        # Then apply_tuple to the function takes a tuple(unit_uuid, entry).
        missing_entry_filter = apply_tuple(
            partial(filter_missing_entry, org_unit_map,
                    entry_type.capitalize()))

        entries = starmap(gen_entry, entries)
        entries = filter(missing_entry_filter, entries)
        for unit_uuid, entry in entries:
            org_unit_map[unit_uuid][entry_type].append(entry)
        return org_unit_map

    def enrich_employees_with_x(employee_map, entry_type, entry_gen, entries):
        def gen_entry(x, enhed):
            return x.bruger_uuid, entry_gen(x, enhed)

        # Bind two arguments so the function only takes unit_uuid, entry.
        # Then apply_tuple to the function takes a tuple(unit_uuid, entry).
        missing_entry_filter = apply_tuple(
            partial(filter_missing_entry, employee_map,
                    entry_type.capitalize()))

        # Add org-units to queue as side-effect
        entries = side_effect(lambda x_enhed: add_org_unit(x_enhed[1]),
                              entries)
        entries = starmap(gen_entry, entries)
        entries = filter(missing_entry_filter, entries)
        for bruger_uuid, entry in entries:
            employee_map[bruger_uuid][entry_type].append(entry)
        return employee_map

    def enrich_org_units_with_engagements(org_unit_map):
        def gen_engagement(engagement, bruger):
            return {
                "title": engagement.stillingsbetegnelse_titel,
                "name": bruger.fornavn + " " + bruger.efternavn,
                "uuid": bruger.uuid,
            }

        engagements = session.query(
            Engagement,
            Bruger).filter(Engagement.bruger_uuid == Bruger.uuid).all()
        return enrich_org_unit_with_x(org_unit_map, "engagements",
                                      gen_engagement, engagements)

    def enrich_org_units_with_associations(org_unit_map):
        def gen_association(tilknytning, bruger):
            return {
                "title": tilknytning.tilknytningstype_titel,
                "name": bruger.fornavn + " " + bruger.efternavn,
                "uuid": bruger.uuid,
            }

        associations = session.query(
            Tilknytning,
            Bruger).filter(Tilknytning.bruger_uuid == Bruger.uuid).all()
        return enrich_org_unit_with_x(org_unit_map, "associations",
                                      gen_association, associations)

    def enrich_org_units_with_management(org_unit_map):
        def gen_management(leder, bruger):
            return {
                "title": leder.ledertype_titel,
                "name": bruger.fornavn + " " + bruger.efternavn,
                "uuid": bruger.uuid,
            }

        managements = session.query(
            Leder, Bruger).filter(Leder.bruger_uuid == Bruger.uuid).all()
        return enrich_org_unit_with_x(org_unit_map, "management",
                                      gen_management, managements)

    def enrich_org_units_with_kles(org_unit_map):
        def gen_kle(kle):
            return kle.enhed_uuid, {
                "title": kle.kle_nummer_titel,
                # "name": kle.kle_aspekt_titel,
                "uuid": kle.uuid,
            }

        # Bind two arguments so the function only takes unit_uuid, entry.
        # Then apply_tuple to the function takes a tuple(unit_uuid, entry).
        missing_entry_filter = apply_tuple(
            partial(filter_missing_entry, org_unit_map, "KLE"))

        kles = session.query(KLE).all()
        kles = filter(lambda kle: kle.kle_aspekt_titel == 'Udførende', kles)
        kles = map(gen_kle, kles)
        kles = filter(missing_entry_filter, kles)
        for unit_uuid, kle in kles:
            org_unit_map[unit_uuid]["kles"].append(kle)
        return org_unit_map

    org_unit_map = {}
    org_unit_queue = set()

    def queue_org_unit(uuid=None):
        if uuid is None:
            return
        org_unit_queue.add(uuid)

    def fetch_parent_org_units():
        # We trust that heirarchies are somewhat shallow, and thus a query per layer is okay.
        while org_unit_queue:
            query_queue = list(org_unit_queue)
            org_unit_queue.clear()
            queryset = session.query(Enhed).filter(
                Enhed.uuid.in_(query_queue)).all()
            for enhed in queryset:
                add_org_unit(enhed)

    def add_org_unit(enhed):
        # Assuming it has already been added, do not read
        if enhed.uuid in org_unit_map:
            return

        unit = {
            "uuid": enhed.uuid,
            "name": enhed.navn,
            "parent": enhed.forældreenhed_uuid,
            "engagements": [],
            "associations": [],
            "management": [],
            "kles": [],
            "addresses": {
                "DAR": [],
                "PHONE": [],
                "EMAIL": [],
                "EAN": [],
                "PNUMBER": [],
                "WWW": [],
            },
        }
        org_unit_map[enhed.uuid] = unit

        # Add parent to queue for bulk fetching later (if any)
        queue_org_unit(enhed.forældreenhed_uuid)

    def fetch_employees():
        def employee_to_dict(employee):
            return {
                "uuid": employee.uuid,
                "surname": employee.efternavn,
                "givenname": employee.fornavn,
                "name": employee.fornavn + " " + employee.efternavn,
                "engagements": [],
                "associations": [],
                "management": [],
                "addresses": {
                    "DAR": [],
                    "PHONE": [],
                    "EMAIL": [],
                    "EAN": [],
                    "PNUMBER": [],
                    "WWW": []
                }
            }

        def create_uuid_tuple(entry):
            return entry["uuid"], entry

        employees = map(employee_to_dict, session.query(Bruger).all())
        employee_map = dict(map(create_uuid_tuple, employees))
        return employee_map

    def enrich_employees_with_engagements(employee_map):
        def gen_engagement(engagement, enhed):
            return {
                "title": engagement.stillingsbetegnelse_titel,
                "name": enhed.navn,
                "uuid": enhed.uuid,
            }

        engagements = session.query(
            Engagement,
            Enhed).filter(Engagement.enhed_uuid == Enhed.uuid).all()
        return enrich_employees_with_x(employee_map, "engagements",
                                       gen_engagement, engagements)

    def enrich_employees_with_associations(employee_map):
        def gen_association(tilknytning, enhed):
            return {
                "title": tilknytning.tilknytningstype_titel,
                "name": enhed.navn,
                "uuid": enhed.uuid,
            }

        associations = session.query(
            Tilknytning,
            Enhed).filter(Tilknytning.enhed_uuid == Enhed.uuid).all()
        return enrich_employees_with_x(employee_map, "associations",
                                       gen_association, associations)

    def enrich_employees_with_management(employee_map):
        def gen_management(leder, enhed):
            return {
                "title": leder.ledertype_titel,
                "name": enhed.navn,
                "uuid": enhed.uuid,
            }

        managements = session.query(
            Leder, Enhed).filter(Leder.enhed_uuid == Enhed.uuid).filter(
                # Filter vacant leders
                Leder.bruger_uuid != None).all()
        return enrich_employees_with_x(employee_map, "management",
                                       gen_management, managements)

    def filter_employees(employee_map):
        def filter_function(phonebook_entry):
            # Do NOT import employees without an engagement or association
            # https://redmine.magenta-aps.dk/issues/34812

            # We do however want to import employees with management roles.
            # As an external employee may be a manager for an organisation unit.
            if (not phonebook_entry["associations"]
                    and not phonebook_entry["engagements"]
                    and not phonebook_entry["management"]):
                logger.info(
                    "OS2MO_IMPORT_ROUTINE Skip employee due to missing engagements, associations, management"
                )

                # Reference to the skipped employee to debug log
                logger.debug(
                    "OS2MO_IMPORT_ROUTINE - NO_RELATIONS_TO_ORG_UNIT employee={phonebook_entry['uuid']}"
                )
                return False
            return True

        filtered_map = {
            uuid: entry
            for uuid, entry in employee_map.items() if filter_function(entry)
        }
        return filtered_map

    def enrich_org_units_with_addresses(org_unit_map):
        # Enrich with adresses
        queryset = session.query(Adresse).filter(Adresse.enhed_uuid != None)

        return address_helper(queryset, org_unit_map,
                              lambda address: address.enhed_uuid)

    def enrich_employees_with_addresses(employee_map):
        # Enrich with adresses
        queryset = session.query(Adresse).filter(Adresse.bruger_uuid != None)

        return address_helper(queryset, employee_map,
                              lambda address: address.bruger_uuid)

    def address_helper(queryset, entry_map, address_to_uuid):
        da_address_types = {
            "DAR": "DAR",
            "Telefon": "PHONE",
            "E-mail": "EMAIL",
            "EAN": "EAN",
            "P-nummer": "PNUMBER",
            "Url": "WWW",
        }

        dawa_queue = {}

        def process_address(address):
            entry_uuid = address_to_uuid(address)
            if entry_uuid not in entry_map:
                return

            atype = da_address_types[address.adressetype_scope]

            if address.værdi:
                value = address.værdi
            elif address.dar_uuid is not None:
                dawa_queue[address.dar_uuid] = dawa_queue.get(
                    address.dar_uuid, [])
                dawa_queue[address.dar_uuid].append(address)
                return
            else:
                logger.warning("Address: {address.uuid} does not have a value")
                return

            formatted_address = {
                "description": address.adressetype_titel,
                "value": value,
            }

            entry_map[entry_uuid]["addresses"][atype].append(formatted_address)

        queryset = queryset.filter(
            # Only include address types we care about
            Adresse.adressetype_scope.in_(da_address_types.keys())).filter(
                # Do not include secret addresses
                or_(Adresse.synlighed_titel == None,
                    Adresse.synlighed_titel != "Hemmelig"))
        for address in queryset.all():
            process_address(address)

        uuids = set(dawa_queue.keys())
        queryset = session.query(DARAdresse).filter(DARAdresse.uuid.in_(uuids))
        betegnelser = map(attrgetter('betegnelse'), queryset.all())
        betegnelser = filter(lambda x: x is not None, betegnelser)
        for value in betegnelser:
            for address in dawa_queue[dar_uuid]:
                entry_uuid = address_to_uuid(address)
                atype = da_address_types[address.adressetype_scope]

                formatted_address = {
                    "description": address.adressetype_titel,
                    "value": value,
                }

                entry_map[entry_uuid]["addresses"][atype].append(
                    formatted_address)

        found = set(map(attrgetter('uuid'), queryset.all()))
        missing = uuids - found
        if missing:
            print(missing, "not found in DAWA")

        return entry_map

    # Employees
    # ----------
    employee_map = None
    with elapsedtime("fetch_employees"):
        employee_map = fetch_employees()
    # NOTE: These 3 queries can run in parallel
    with elapsedtime("enrich_employees_with_engagements"):
        employee_map = enrich_employees_with_engagements(employee_map)
    with elapsedtime("enrich_employees_with_associations"):
        employee_map = enrich_employees_with_associations(employee_map)
    with elapsedtime("enrich_employees_with_management"):
        employee_map = enrich_employees_with_management(employee_map)
    # Filter off employees without engagements, assoications and management
    with elapsedtime("filter_employees"):
        employee_map = filter_employees(employee_map)
    with elapsedtime("enrich_employees_with_addresses"):
        employee_map = enrich_employees_with_addresses(employee_map)

    # Org Units
    # ----------
    with elapsedtime("fetch_parent_org_units"):
        fetch_parent_org_units()
    # NOTE: These 3 queries can run in parallel
    with elapsedtime("enrich_org_units_with_engagements"):
        org_unit_map = enrich_org_units_with_engagements(org_unit_map)
    with elapsedtime("enrich_org_units_with_associations"):
        org_unit_map = enrich_org_units_with_associations(org_unit_map)
    with elapsedtime("enrich_org_units_with_management"):
        org_unit_map = enrich_org_units_with_management(org_unit_map)
    with elapsedtime("enrich_org_units_with_kles"):
        org_unit_map = enrich_org_units_with_kles(org_unit_map)
    with elapsedtime("enrich_org_units_with_addresses"):
        org_unit_map = enrich_org_units_with_addresses(org_unit_map)

    print("Processing took", query_counter.count, "queries")

    # Write files
    # ------------
    # TODO: Asyncio to write both files at once?
    with open("tmp/employees.json", "w") as employees_out:
        json.dump(employee_map, employees_out)

    with open("tmp/org_units.json", "w") as org_units_out:
        json.dump(org_unit_map, org_units_out)
示例#50
0
    event
from sqlalchemy.types import ARRAY
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy.pool import QueuePool
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.engine.url import URL

import os
import yaml


Base = declarative_base(metadata=MetaData(schema='results'))
event.listen(
    Base.metadata,
    'before_create',
    DDL("CREATE SCHEMA IF NOT EXISTS results")
)

group_proc_filename = os.path.join(
    os.path.dirname(__file__),
    'model_group_stored_procedure.sql'
)
with open(group_proc_filename) as f:
    stmt = f.read()

event.listen(
    Base.metadata,
    'before_create',
    DDL(stmt)
)
示例#51
0
文件: net.py 项目: agdsn/pycroft
class VLAN(IntegerIdModel):
    name = Column(String(127), nullable=False)
    vid = Column(Integer, nullable=False)

    __table_args__ = (CheckConstraint(between(vid, 1, 4094)), )
    switch_ports = relationship('SwitchPort',
                                secondary='switch_port_default_vlans',
                                back_populates='default_vlans')
    subnets = relationship('Subnet', back_populates='vlan', viewonly=True)


class Subnet(IntegerIdModel):
    address = Column(IPNetwork, nullable=False)
    gateway = Column(IPAddress)
    reserved_addresses_bottom = Column(Integer,
                                       server_default=sql.text('0'),
                                       nullable=False)
    reserved_addresses_top = Column(Integer,
                                    server_default=sql.text('0'),
                                    nullable=False)
    description = Column(String(50))

    vlan_id = Column(Integer, ForeignKey(VLAN.id), nullable=False, index=True)
    vlan = relationship(VLAN, back_populates="subnets")


# Ensure that the gateway is contained in the subnet
constraint = CheckConstraint(Subnet.gateway.op('<<')(Subnet.address))
event.listen(Subnet.__table__, "after_create",
             AddConstraint(constraint).execute_if(dialect='postgresql'))
示例#52
0
        listen(Base.metadata, 'after_create',
               ddl.execute_if(dialect='postgresql'))


def grant_db_access(_, conn, *args, **kwargs):
    user = get_config('unpriv_db_username', None)
    if user:
        conn.execute("""
                     GRANT SELECT, INSERT, UPDATE, DELETE
                     ON ALL TABLES IN SCHEMA PUBLIC TO {user};
                     GRANT SELECT, USAGE ON ALL SEQUENCES
                     IN SCHEMA PUBLIC TO {user};
                     """.format(user=user))


listen(Table, 'after_create', grant_db_access)


def create_all():
    conn = get_engine().connect()
    try:
        conn.execute("SHOW bdr.permit_ddl_locking")
        bdr = True
    except Exception:
        bdr = False
    tx = conn.begin()
    try:
        if bdr:
            conn.execute("SET LOCAL bdr.permit_ddl_locking = true")
        load_ddl()
        Base.metadata.create_all(
示例#53
0
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """
    settings = getSettings(global_config['__file__'], settings)
    log = logging.getLogger(__name__)

    engine = engine_from_config(settings, 'sqlalchemy.')
    event.listen(engine, 'checkout', checkout_listener)
    DBSession.configure(bind=engine)
    Base.metadata.bind = engine

    config = Configurator(settings=settings, root_factory=RootFactory)
    config.include('pyramid_chameleon')
    config.include('pyramid_ldap')
    config.add_static_view('static', 'static', cache_max_age=3600)
    config.add_route('home', '/')
    config.add_route('login', '/login')
    config.add_route('logout', '/logout')
    config.add_route('applications', '/applications')
    config.add_route('deploys', '/deploys')
    config.add_route('promote', '/promote')
    config.add_route('help', '/help')
    config.add_route('user', '/user')
    config.add_route('ss', '/ss')
    config.add_route('cp', '/cp')
    config.add_route('cp_application', '/cp/application')
    config.add_route('cp_user', '/cp/user')
    config.add_route('cp_group', '/cp/group')
    config.add_route('api', '/api/{resource}')
    config.add_route('healthcheck', '/healthcheck')
    config.add_route('test', '/test')
    config.add_renderer('json', JSON(indent=2))

    if settings['tcw.auth_mode'] == 'ldap':
        log.info('Configuring ldap users and groups')

        config.set_authentication_policy(
            AuthTktAuthenticationPolicy(settings['tcw.cookie_token'], callback=groupfinder, max_age=604800)
            )

        # Load the cert if it's defined and exists
        if os.path.isfile(settings['tcw.ldap_cert']):
            ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings['tcw.ldap_cert'])
    
        config.ldap_setup(
            settings['tcw.ldap_server'] + ':' + settings['tcw.ldap_port'],
            bind = settings['tcw.ldap_bind'],
            passwd = settings['tcw.ldap_password'],
            )
        
        config.ldap_set_login_query(
            base_dn = settings['tcw.login_base_dn'],
            filter_tmpl = settings['tcw.login_filter'],
            scope = ldap.SCOPE_SUBTREE,
            cache_period = 600,
            )
        
        config.ldap_set_groups_query(
            base_dn = settings['tcw.group_base_dn'],
            filter_tmpl= settings['tcw.group_filter'],
            scope = ldap.SCOPE_SUBTREE,
            cache_period = 600,
            )
    else:
        log.info('Configuring local users and groups.')
        config.set_authentication_policy(
            AuthTktAuthenticationPolicy(settings['tcw.cookie_token'], callback=local_groupfinder, max_age=604800)
            )

    config.set_authorization_policy(
        ACLAuthorizationPolicy()
        )

    # Load our groups and perms from the db and load them into the ACL
    try:
        r = DBSession.query(Group).all()
        for g in range(len(r)):
            ga = r[g].get_all_assignments()
            if ga:
                ga = tuple(ga)
                RootFactory.__acl__.append([Allow, r[g].group_name, ga])

    except Exception, e:
        raise
示例#54
0
        """
        build up fulltext index after table is created
        """
        if FullText not in cls.__bases__:
            return
        assert cls.__fulltext_columns__, "Model:{0.__name__} No FullText columns defined".format(
            cls)

        event.listen(
            cls.__table__, 'after_create',
            DDL(
                MYSQL_BUILD_INDEX_QUERY.format(
                    cls, ", ".join(
                        (escape_quote(c) for c in cls.__fulltext_columns__)))))

    """
    TODO: black magic in the future
    @classmethod
    @declared_attr
    def __contains__(*arg):
        return True
    """


def __build_fulltext_index(mapper, class_):
    if issubclass(class_, FullText):
        class_.build_fulltext()


event.listen(Mapper, 'instrument_class', __build_fulltext_index)
示例#55
0
 def start_logging(self):
     event.listen(self.engine,
                  "after_cursor_execute",
                  self._after_cursor_execute_event_handler,
                  named=True)
示例#56
0
def _mappers_configured():
    for model in get_all_models():
        if hasattr(model, '__table__') and model.allow_relationship_preloading:
            listen(model, 'load', model._populate_preloaded_relationships)
示例#57
0
    pagestate_id = Column(Integer,
                          ForeignKey('pagestate.id', name='fk_page_pagestate'))
    pagestate = relationship('PageState', backref='Page', cascade='all,delete')

    pageviews = Column(Integer, default=0)

    def __repr__(self):
        return self.title


def page_before_insert_update_listener(mapper, connection, target):
    if target.url is None:
        target.url = get_safe_url(target.title)


event.listen(Page, 'before_insert', page_before_insert_update_listener)
event.listen(Page, 'before_update', page_before_insert_update_listener)


class Menu(db.Model):
    __tablename__ = 'menu'

    id = Column(Integer, primary_key=True)
    title = Column(String, nullable=False)
    order = Column(Integer, nullable=False)

    page_id = Column(Integer, ForeignKey('page.id'))
    page = relationship("Page",
                        backref=backref("Linked from Menu", uselist=False))

    def __repr__(self):
示例#58
0
                   }})

    def __init__(self,
                 name: str = '',
                 value: str = '',
                 description: str = '') -> None:
        self.name = name
        self.value = value
        self.description = description

    def __unicode__(self) -> str:
        return '{0!s} - {1!s}'.format(self.name or '', self.value
                                      or '')  # pragma: no cover


event.listen(Functionality, 'after_update', cache_invalidate_cb)
event.listen(Functionality, 'after_delete', cache_invalidate_cb)

# association table role <> functionality
role_functionality = Table('role_functionality',
                           Base.metadata,
                           Column('role_id',
                                  Integer,
                                  ForeignKey(_schema + '.role.id'),
                                  primary_key=True),
                           Column('functionality_id',
                                  Integer,
                                  ForeignKey(_schema + '.functionality.id'),
                                  primary_key=True),
                           schema=_schema)
示例#59
0
                                  echo=False)  # sqlalchemy sqls...

session_maker = sqlalchemy.orm.sessionmaker()
session_maker.configure(bind=engine)
session = session_maker()

by_rules = True  # True => use rules, False => use hand code (for comparison)
rule_list = None
db = None
if by_rules:
    rule_bank_setup.setup(session, engine)
    activate_basic_check_credit_rules()
    rule_bank_setup.validate(session, engine)  # checks for cycles, etc
else:
    # target, modifier, function
    event.listen(session, "before_commit", nw_before_commit)
    event.listen(session, "before_flush", nw_before_flush)

print("\n" + prt("session created, listeners registered\n"))
'''  *** Exploring alternate listener strategies - ignore ***
@event.listens_for(models.Order.ShippedDate, 'modified')
def receive_modified(target, initiator):
    print('Order Modified (Decorator - __init__')
'''
'''
@event.listens_for(Order, 'before_update')
def before_update(mapper, connection, target):
    state = db.inspect(target)
    changes = {}

    for attr in state.attrs:
示例#60
0
    @property
    def is_authenticated(self):
        return True

    def get_id(self):
        try:
            return self.id
        except AttributeError:
            raise NotImplementedError('No `id` attribute - override `get_id`')

    def __repr__(self):
        return '<User %r>' % (self.username)


def hash_password(target, value, oldvalue, initiator):
    if value is not None:
        return generate_password_hash(value)


# Setup listener on User attribute password
event.listen(User.password, 'set', hash_password, retval=True)


@login_manager.user_loader
def load_user(id):
    """
    For flask-login get user id
    """
    return User.query.get(int(id))