def autoinc_handler(sa_table, sa_table_name, id_column_name, id_seq_name, insert_dict):
    # Specify that all insert operations (whether through SQLAlchemy or
    # externally launched) must use the sequence for DBs that support
    # sequences
    sa.event.listen(
        sa_table, 'after_create', sa.DDL(
            "ALTER TABLE %s ALTER COLUMN %s SET DEFAULT nextval('%s');" %
            (sa_table_name, id_column_name, id_seq_name)).execute_if(
            dialect=('postgresql', 'mysql')))
    # SQLite does not support sequences - insert a dummy row with id=-1 so
    # that the next insert operation will use id=0
    keys_list = []
    values_list = []
    for key, val in insert_dict.iteritems():
        keys_list.append(key)
        # if None, generate random string
        values_list.append(val if val else ("'%s'" % (str(uuid.uuid4()))))
    sa.event.listen(
      sa_table, 
      'after_create', 
      sa.DDL(("INSERT INTO %s ( " +
              ",".join([str(x) for x in keys_list]) +
              " ) VALUES ( " +
              ",".join([str(x) for x in values_list]) +
              " );") %
              (sa_table_name)).execute_if(dialect='sqlite'))
Esempio n. 2
0
    def define_tables(cls, metadata):
        from sqlalchemy.testing import config
        dblink = config.file_config.get('sqla_testing',
                                        'postgres_test_db_link')

        testtable = Table('testtable', metadata,
                          Column('id', Integer, primary_key=True),
                          Column('data', String(30)))

        for ddl in [
                "CREATE SERVER test_server FOREIGN DATA WRAPPER postgres_fdw "
                "OPTIONS (dbname 'test', host '%s')" % dblink,
                "CREATE USER MAPPING FOR public \
            SERVER test_server options (user 'scott', password 'tiger')",
                "CREATE FOREIGN TABLE test_foreigntable ( "
                "   id          INT, "
                "   data        VARCHAR(30) "
                ") SERVER test_server OPTIONS (table_name 'testtable')",
        ]:
            sa.event.listen(metadata, "after_create", sa.DDL(ddl))

        for ddl in [
                'DROP FOREIGN TABLE test_foreigntable',
                'DROP USER MAPPING FOR public SERVER test_server',
                "DROP SERVER test_server"
        ]:
            sa.event.listen(metadata, "before_drop", sa.DDL(ddl))
Esempio n. 3
0
def downgrade():
    op.add_column(
        u'user',
        sa.Column('username',
                  sa.VARCHAR(length=80),
                  autoincrement=False,
                  nullable=True))
    op.create_unique_constraint(u'user_username_key', 'user', ['username'])
    op.add_column(
        u'organization',
        sa.Column('name',
                  sa.VARCHAR(length=80),
                  autoincrement=False,
                  nullable=True))
    op.create_unique_constraint(u'organization_name_key', 'organization',
                                ['name'])

    op.drop_index(op.f('ix_name_name_lower'), table_name='name')
    op.execute(
        sa.DDL(
            '''CREATE INDEX ix_user_username_lower ON "user" (lower(username) varchar_pattern_ops);'''
        ))

    op.execute(
        sa.DDL('''
        UPDATE "user" SET (username) = (SELECT name FROM "name" WHERE "name".user_id = "user".id)
        '''))
    op.execute(
        sa.DDL('''
        UPDATE "organization" SET (name) = (SELECT name FROM "name" WHERE "name".org_id = "organization".id)
        '''))

    op.drop_index(op.f('ix_name_reserved'), table_name='name')
    op.drop_table('name')
Esempio n. 4
0
def downgrade():
    op.execute(
        sa.DDL(
            dedent('''
        DROP TRIGGER update_search_vector_trigger ON update;
        DROP FUNCTION update_search_vector_update();
        ''')))

    op.execute(
        sa.DDL(
            dedent('''
        CREATE FUNCTION post_search_vector_update() RETURNS trigger AS $$
        BEGIN
            NEW.search_vector := setweight(to_tsvector('english', COALESCE(NEW.name, '')), 'A') || setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') || setweight(to_tsvector('english', COALESCE(NEW.body_text, '')), 'B');
            RETURN NEW;
        END
        $$ LANGUAGE plpgsql;

        CREATE TRIGGER post_search_vector_trigger BEFORE INSERT OR UPDATE ON update
        FOR EACH ROW EXECUTE PROCEDURE post_search_vector_update();
                ''')))

    for old, new in renamed_constraints:
        op.execute(
            sa.DDL(f'ALTER TABLE update RENAME CONSTRAINT "{new}" TO "{old}"'))
    op.alter_column('update',
                    'project_id',
                    existing_type=sa.INTEGER(),
                    nullable=True)
Esempio n. 5
0
def downgrade():
    # 1. Add primary columns
    op.add_column('userphone', sa.Column('primary', sa.BOOLEAN(), autoincrement=False, nullable=False,
        server_default=expression.false()))
    op.add_column('useremail', sa.Column('primary', sa.BOOLEAN(), autoincrement=False, nullable=False,
        server_default=expression.false()))
    op.alter_column('userphone', 'primary', server_default=None)
    op.alter_column('useremail', 'primary', server_default=None)

    # 2. Update primary flags
    op.execute(sa.DDL('''
        UPDATE useremail SET "primary" = true
        FROM user_useremail_primary WHERE useremail.id = user_useremail_primary.useremail_id;
        '''))
    op.execute(sa.DDL('''
        UPDATE userphone SET "primary" = true
        FROM user_userphone_primary WHERE userphone.id = user_userphone_primary.userphone_id;
        '''))

    # 3. Drop primary tables
    op.execute(sa.DDL('''
        DROP TRIGGER user_userphone_primary_trigger ON user_userphone_primary;
        DROP FUNCTION user_userphone_primary_validate();
        '''))
    op.execute(sa.DDL('''
        DROP TRIGGER user_useremail_primary_trigger ON user_useremail_primary;
        DROP FUNCTION user_useremail_primary_validate();
        '''))
    op.drop_table('user_userphone_primary')
    op.drop_table('user_useremail_primary')
Esempio n. 6
0
def downgrade():
    # rename columns
    op.alter_column("comp_pipeline",
                    "state",
                    new_column_name="deprecated_state")
    op.alter_column("comp_tasks", "state", new_column_name="deprecated_state")
    # create the old columns again
    op.add_column("comp_pipeline",
                  sa.Column("state", sa.String(), nullable=True))
    op.add_column("comp_tasks", sa.Column("state", sa.Integer(),
                                          nullable=True))
    # migrate the columns
    migration_map = {
        "NOT_STARTED": UNKNOWN,
        "PUBLISHED": UNKNOWN,
        "PENDING": PENDING,
        "RUNNING": RUNNING,
        "SUCCESS": SUCCESS,
        "FAILED": FAILED,
    }
    [
        op.execute(
            sa.DDL(f"""
UPDATE comp_pipeline
SET state='{new}'
WHERE comp_pipeline.deprecated_state = '{old}'
""")) for old, new in migration_map.items()
    ]
    [
        op.execute(
            sa.DDL(f"""
UPDATE comp_tasks
SET state='{new}'
WHERE comp_tasks.deprecated_state = '{old}'
""")) for old, new in migration_map.items()
    ]
    # replace trigger to remove dependency on old state
    replace_trigger_query = sa.DDL(f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
CREATE TRIGGER {DB_TRIGGER_NAME}
AFTER UPDATE OF outputs ON comp_tasks
    FOR EACH ROW
    WHEN (OLD.outputs::jsonb IS DISTINCT FROM NEW.outputs::jsonb AND NEW.node_class <> 'FRONTEND')
    EXECUTE PROCEDURE {DB_PROCEDURE_NAME}();
""")
    op.execute(replace_trigger_query)
    # drop the columns
    op.drop_column("comp_tasks", "deprecated_state")
    op.drop_column("comp_pipeline", "deprecated_state")

    state_type = postgresql.ENUM(
        "NOT_STARTED",
        "PUBLISHED",
        "PENDING",
        "RUNNING",
        "SUCCESS",
        "FAILED",
        name="statetype",
    )
    state_type.drop(op.get_bind())
Esempio n. 7
0
    def define_tables(cls, metadata):
        testtable = Table(
            'testtable', metadata,
            Column('id', Integer, primary_key=True),
            Column('data', String(30)))

        # insert data before we create the view
        @sa.event.listens_for(testtable, "after_create")
        def insert_data(target, connection, **kw):
            connection.execute(
                target.insert(),
                {"id": 89, "data": 'd1'}
            )

        materialized_view = sa.DDL(
            "CREATE MATERIALIZED VIEW test_mview AS "
            "SELECT * FROM testtable")

        plain_view = sa.DDL(
            "CREATE VIEW test_regview AS "
            "SELECT * FROM testtable")

        sa.event.listen(testtable, 'after_create', plain_view)
        sa.event.listen(testtable, 'after_create', materialized_view)
        sa.event.listen(
            testtable, 'before_drop',
            sa.DDL("DROP MATERIALIZED VIEW test_mview")
        )
        sa.event.listen(
            testtable, 'before_drop',
            sa.DDL("DROP VIEW test_regview")
        )
def downgrade():
    op.execute(
        sa.DDL('ALTER TABLE team_membership ALTER COLUMN created_at '
               'TYPE TIMESTAMP WITHOUT TIME ZONE'))

    op.add_column('team_membership',
                  sa.Column('updated_at', sa.DateTime(), nullable=True))
    op.execute(sa.DDL('UPDATE team_membership SET updated_at = created_at'))
    op.alter_column('team_membership', 'updated_at', nullable=False)
def downgrade():
    op.execute(
        sa.DDL(
            '''
        DROP TRIGGER user_useremail_primary_trigger ON user_useremail_primary;
        DROP FUNCTION user_useremail_primary_validate();
        '''
        )
    )
    op.execute(
        sa.DDL(
            '''
        DROP TRIGGER user_userphone_primary_trigger ON user_userphone_primary;
        DROP FUNCTION user_userphone_primary_validate();
        '''
        )
    )
    op.execute(
        sa.DDL(
            '''
        CREATE FUNCTION user_useremail_primary_validate() RETURNS TRIGGER AS $$
        DECLARE
            target RECORD;
        BEGIN
            SELECT user_id INTO target FROM useremail WHERE id = NEW.useremail_id;
            IF (target.user_id != NEW.user_id) THEN
                RAISE foreign_key_violation USING MESSAGE = 'The target is not affiliated with this parent';
            END IF;
            RETURN NEW;
        END;
        $$ LANGUAGE plpgsql;
        CREATE TRIGGER user_useremail_primary_trigger BEFORE INSERT OR UPDATE
        ON user_useremail_primary
        FOR EACH ROW EXECUTE PROCEDURE user_useremail_primary_validate();
        '''
        )
    )
    op.execute(
        sa.DDL(
            '''
        CREATE FUNCTION user_userphone_primary_validate() RETURNS TRIGGER AS $$
        DECLARE
            target RECORD;
        BEGIN
            SELECT user_id INTO target FROM userphone WHERE id = NEW.userphone_id;
            IF (target.user_id != NEW.user_id) THEN
                RAISE foreign_key_violation USING MESSAGE = 'The target is not affiliated with this parent';
            END IF;
            RETURN NEW;
        END;
        $$ LANGUAGE plpgsql;
        CREATE TRIGGER user_userphone_primary_trigger BEFORE INSERT OR UPDATE
        ON user_userphone_primary
        FOR EACH ROW EXECUTE PROCEDURE user_userphone_primary_validate();
        '''
        )
    )
Esempio n. 10
0
def reset_schema(engine, schema_name):
    """Execute drop and declaration for schema space."""

    schema = pg.Schema(schema_name)
    drop_statement = sa.DDL(schema.drop_statement())
    create_statement = sa.DDL(schema.create_statement())

    with engine.begin() as conn:
        conn.execute(drop_statement)
        conn.execute(create_statement)
Esempio n. 11
0
    def test_inspect_enums_case_sensitive_from_table(self):
        sa.event.listen(
            self.metadata,
            "before_create",
            sa.DDL('create schema "TestSchema"'),
        )
        sa.event.listen(
            self.metadata,
            "after_drop",
            sa.DDL('drop schema "TestSchema" cascade'),
        )

        counter = itertools.count()
        for enum in "lower_case", "UpperCase", "Name.With.Dot":
            for schema in None, "test_schema", "TestSchema":

                enum_type = postgresql.ENUM(
                    "CapsOne",
                    "CapsTwo",
                    name=enum,
                    metadata=self.metadata,
                    schema=schema,
                )

                Table(
                    "t%d" % next(counter),
                    self.metadata,
                    Column("q", enum_type),
                )

        self.metadata.create_all(testing.db)

        inspector = inspect(testing.db)
        counter = itertools.count()
        for enum in "lower_case", "UpperCase", "Name.With.Dot":
            for schema in None, "test_schema", "TestSchema":
                cols = inspector.get_columns("t%d" % next(counter))
                cols[0]["type"] = (
                    cols[0]["type"].schema,
                    cols[0]["type"].name,
                    cols[0]["type"].enums,
                )
                eq_(
                    cols,
                    [
                        {
                            "name": "q",
                            "type": (schema, enum, ["CapsOne", "CapsTwo"]),
                            "nullable": True,
                            "default": None,
                            "autoincrement": False,
                            "comment": None,
                        }
                    ],
                )
Esempio n. 12
0
def rebuild_views():
    # This import is kinda kludgy (and would be circular outside of this function) but our model engine is tied up with
    # the Flask framework (for now)
    from bafs import db
    sess = db.session  # @UndefinedVariable
    sess.execute(_v_daily_scores_create)
    sess.execute(sa.DDL("drop view if exists _build_ride_daylight;"))
    sess.execute(_v_buid_ride_daylight)
    sess.execute(sa.DDL("drop view if exists ride_daylight;"))
    sess.execute(_v_ride_daylight)
    sess.execute(sa.DDL("drop view if exists lbd_athletes;"))
    sess.execute(_v_leaderboard_athletes)
Esempio n. 13
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_constraint(
        "user_to_projects_user_id_fkey", "user_to_projects", type_="foreignkey"
    )
    op.drop_constraint(
        "user_to_projects_project_id_fkey", "user_to_projects", type_="foreignkey"
    )
    op.create_foreign_key(
        "fk_user_to_projects_id_projects",
        "user_to_projects",
        "projects",
        ["project_id"],
        ["id"],
        onupdate="CASCADE",
        ondelete="CASCADE",
    )
    op.create_foreign_key(
        "fk_user_to_projects_id_users",
        "user_to_projects",
        "users",
        ["user_id"],
        ["id"],
        onupdate="CASCADE",
        ondelete="CASCADE",
    )

    # change contents in projects access_rights
    # NOTE: this does not need to be reversed as it was not used before
    op.execute(
        sa.DDL(
            "UPDATE projects SET access_rights = (regexp_replace(access_rights::text, '\"rwx\"', '{\"read\":true, \"write\":false, \"delete\":false}')::jsonb) WHERE access_rights != '{}'"
        )
    )
    # add prj_owner into access rights column
    # NOTE: this dows not need to be reversed
    op.execute(
        sa.DDL(
    """
WITH user_project as (
 SELECT projects.id AS pid, projects.access_rights AS current_rights, '{"' || users.primary_gid || '"}' AS json_key
  FROM projects
  INNER JOIN users
  ON (projects.prj_owner = users.id)
)

UPDATE projects
 SET access_rights = jsonb_insert(current_rights::jsonb,json_key::text[], '{"read":true, "write":true, "delete":true}'::jsonb, true)
 FROM user_project
 WHERE projects.id = pid
    """
        )
    )
def upgrade():
    op.drop_index('ix_post_profile_id', 'post')
    op.drop_constraint('post_owner_check', 'post', type_='check')
    op.drop_column('post', 'profile_id')
    op.rename_table('post', 'update')
    op.execute(sa.DDL('ALTER SEQUENCE post_id_seq RENAME TO update_id_seq'))

    for old, new in renamed_constraints:
        op.execute(
            sa.DDL(f'ALTER TABLE update RENAME CONSTRAINT "{old}" TO "{new}"'))

    for old, new in renamed_indexes:
        op.execute(sa.DDL(f'ALTER INDEX "{old}" RENAME TO "{new}"'))
Esempio n. 15
0
def upgrade():
    op.create_table(
        'name',
        sa.Column('created_at', sa.DateTime(), nullable=False),
        sa.Column('updated_at', sa.DateTime(), nullable=False),
        sa.Column('name', sa.Unicode(length=63), nullable=False),
        sa.Column('user_id', sa.Integer(), nullable=True),
        sa.Column('org_id', sa.Integer(), nullable=True),
        sa.Column('reserved', sa.Boolean(), nullable=False),
        sa.Column('id', UUIDType(binary=False), nullable=False),
        sa.CheckConstraint(
            'CASE WHEN (user_id IS NOT NULL) THEN 1 ELSE 0 END + CASE WHEN (org_id IS NOT NULL) THEN 1 ELSE 0 END + CASE WHEN (reserved = true) THEN 1 ELSE 0 END = 1',
            name='username_owner_check',
        ),
        sa.ForeignKeyConstraint(['org_id'], ['organization.id'],
                                ondelete='CASCADE'),
        sa.ForeignKeyConstraint(['user_id'], ['user.id'], ondelete='CASCADE'),
        sa.PrimaryKeyConstraint('id'),
        sa.UniqueConstraint('name'),
        sa.UniqueConstraint('org_id'),
        sa.UniqueConstraint('user_id'),
    )
    op.create_index(op.f('ix_name_reserved'),
                    'name', ['reserved'],
                    unique=False)

    op.execute(
        sa.DDL('''
        INSERT INTO "name" (reserved, id, created_at, updated_at, name, user_id)
        SELECT False, uuid, created_at, updated_at, username, id FROM "user"
        WHERE username IS NOT null
        ORDER BY created_at;
        '''))
    op.execute(
        sa.DDL('''
        INSERT INTO "name" (reserved, id, created_at, updated_at, name, org_id)
        SELECT False, uuid, created_at, updated_at, name, id FROM "organization"
        WHERE name IS NOT null
        ORDER BY created_at;
        '''))

    op.drop_index(op.f('ix_user_username_lower'), table_name='user')
    op.execute(
        sa.DDL(
            '''CREATE UNIQUE INDEX ix_name_name_lower ON "name" (lower(name) varchar_pattern_ops);'''
        ))

    op.drop_constraint('organization_name_key', 'organization', type_='unique')
    op.drop_column('organization', 'name')
    op.drop_constraint('user_username_key', 'user', type_='unique')
    op.drop_column('user', 'username')
Esempio n. 16
0
    def test_inspect_enums_case_sensitive(self):
        sa.event.listen(
            self.metadata,
            "before_create",
            sa.DDL('create schema "TestSchema"'),
        )
        sa.event.listen(
            self.metadata,
            "after_drop",
            sa.DDL('drop schema "TestSchema" cascade'),
        )

        for enum in "lower_case", "UpperCase", "Name.With.Dot":
            for schema in None, "test_schema", "TestSchema":

                postgresql.ENUM(
                    "CapsOne",
                    "CapsTwo",
                    name=enum,
                    schema=schema,
                    metadata=self.metadata,
                )

        self.metadata.create_all(testing.db)
        inspector = inspect(testing.db)
        for schema in None, "test_schema", "TestSchema":
            eq_(
                sorted(
                    inspector.get_enums(schema=schema), key=itemgetter("name")
                ),
                [
                    {
                        "visible": schema is None,
                        "labels": ["CapsOne", "CapsTwo"],
                        "name": "Name.With.Dot",
                        "schema": "public" if schema is None else schema,
                    },
                    {
                        "visible": schema is None,
                        "labels": ["CapsOne", "CapsTwo"],
                        "name": "UpperCase",
                        "schema": "public" if schema is None else schema,
                    },
                    {
                        "visible": schema is None,
                        "labels": ["CapsOne", "CapsTwo"],
                        "name": "lower_case",
                        "schema": "public" if schema is None else schema,
                    },
                ],
            )
def downgrade():
    # set the ABORTED value to NOT_STARTED and rename the statetype
    op.execute(
        sa.DDL(
            """
UPDATE comp_tasks SET state = 'NOT_STARTED' WHERE state = 'ABORTED';
UPDATE comp_pipeline SET state = 'NOT_STARTED' WHERE state = 'ABORTED';
ALTER TYPE statetype RENAME TO statetype_old;
    """
        )
    )
    # create the statetype
    state_type = postgresql.ENUM(
        "NOT_STARTED",
        "PUBLISHED",
        "PENDING",
        "RUNNING",
        "SUCCESS",
        "FAILED",
        name="statetype",
    )
    state_type.create(op.get_bind())
    # update all the columns, trigger depending on it
    op.execute(
        sa.DDL(
            f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
ALTER TABLE comp_tasks ALTER COLUMN state DROP DEFAULT;
ALTER TABLE comp_tasks ALTER COLUMN state TYPE statetype USING state::text::statetype;
ALTER TABLE comp_tasks ALTER COLUMN state SET DEFAULT 'NOT_STARTED';

ALTER TABLE comp_pipeline ALTER COLUMN state DROP DEFAULT;
ALTER TABLE comp_pipeline ALTER COLUMN state TYPE statetype USING state::text::statetype;
ALTER TABLE comp_pipeline ALTER COLUMN state SET DEFAULT 'NOT_STARTED';
DROP TYPE statetype_old;
    """
        )
    )
    op.execute(
        sa.DDL(
            f"""
DROP TRIGGER IF EXISTS {DB_TRIGGER_NAME} on comp_tasks;
CREATE TRIGGER {DB_TRIGGER_NAME}
AFTER UPDATE OF outputs,state ON comp_tasks
    FOR EACH ROW
    WHEN ((OLD.outputs::jsonb IS DISTINCT FROM NEW.outputs::jsonb OR OLD.state IS DISTINCT FROM NEW.state)
        AND NEW.node_class <> 'FRONTEND')
    EXECUTE PROCEDURE {DB_PROCEDURE_NAME}();
"""
        )
    )
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.execute(
        sa.DDL("DROP TRIGGER IF EXISTS cluster_modification on clusters;"))
    op.execute(sa.DDL("DROP FUNCTION set_cluster_to_owner_group()"))
    op.alter_column("cluster_to_groups", "read", new_column_name="read_access")
    op.alter_column("cluster_to_groups",
                    "write",
                    new_column_name="write_access")
    op.alter_column("cluster_to_groups",
                    "delete",
                    new_column_name="delete_access")
    # ### end Alembic commands ###
    op.execute(assign_cluster_access_rights_to_owner_group_procedure_old)
    op.execute(new_cluster_trigger)
Esempio n. 19
0
    def create_triggers(self,
                        schema: str,
                        tables: Optional[List[str]] = None) -> None:
        """Create a database triggers."""
        self.execute(
            CREATE_TRIGGER_TEMPLATE.replace(MATERIALIZED_VIEW,
                                            f"{schema}.{MATERIALIZED_VIEW}"))
        views = sa.inspect(self.engine).get_view_names(schema)
        queries = []
        for table in self.tables(schema):
            schema, table = self._get_schema(schema, table)
            if (tables and table not in tables) or (table in views):
                continue
            logger.debug(f"Creating trigger on table: {schema}.{table}")
            for name, for_each, tg_op in [
                ("notify", "ROW", ["INSERT", "UPDATE", "DELETE"]),
                ("truncate", "STATEMENT", ["TRUNCATE"]),
            ]:
                self.drop_triggers(schema, [table])
                queries.append(
                    sa.DDL(
                        f'CREATE TRIGGER "{table}_{name}" '
                        f'AFTER {" OR ".join(tg_op)} ON "{schema}"."{table}" '
                        f"FOR EACH {for_each} EXECUTE PROCEDURE "
                        f"{TRIGGER_FUNC}()", ))

        for query in queries:
            self.execute(query)
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.drop_constraint("fk_projects_prj_owner_users",
                       "projects",
                       type_="foreignkey")
    set_prj_owner_conversion_fct = sa.DDL(f"""
CREATE OR REPLACE FUNCTION convert_prj_owner_fct(current BIGINT)
  RETURNS varchar AS
$BODY$
SELECT email FROM users WHERE "id" = current

$BODY$
  LANGUAGE 'sql' IMMUTABLE STRICT;
        """)
    op.execute(set_prj_owner_conversion_fct)
    op.alter_column(
        "projects",
        "prj_owner",
        type_=sa.String,
        nullable=False,
        postgresql_using="convert_prj_owner_fct(prj_owner)",
        # postgresql_using="prj_owner::bigint",
    )
    op.execute("DROP FUNCTION convert_prj_owner_fct(current BIGINT)")
    op.alter_column("projects", "prj_owner", type_=sa.String)
    op.alter_column(
        "projects",
        "last_change_date",
        server_default=None,
        # onupdate=None,
    )
    op.alter_column("projects", "creation_date", server_default=None)
    op.drop_column("projects", "access_rights")
Esempio n. 21
0
def upgrade():
    op.add_column('domain',
                  sa.Column('title', sa.Unicode(length=250), nullable=True))
    op.add_column(
        'domain',
        sa.Column('legal_title', sa.Unicode(length=250), nullable=True))
    op.add_column('domain',
                  sa.Column('description', sa.UnicodeText(), nullable=True))
    op.add_column('domain',
                  sa.Column('logo_url', sa.Unicode(length=250), nullable=True))
    op.add_column(
        'domain',
        sa.Column('search_vector', postgresql.TSVECTOR(), nullable=True))
    op.execute(
        sa.DDL('''
        CREATE FUNCTION domain_search_vector_update() RETURNS TRIGGER AS $$
        BEGIN
            NEW.search_vector = to_tsvector('english', COALESCE(NEW.name, '') || ' ' || COALESCE(NEW.title, '') || ' ' || COALESCE(NEW.legal_title, '') || ' ' || COALESCE(NEW.description, ''));
            RETURN NEW;
        END
        $$ LANGUAGE 'plpgsql';

        CREATE TRIGGER domain_search_vector_trigger BEFORE INSERT OR UPDATE ON domain
        FOR EACH ROW EXECUTE PROCEDURE domain_search_vector_update();

        CREATE INDEX ix_domain_search_vector ON domain USING gin(search_vector);

        UPDATE domain SET search_vector = to_tsvector('english', COALESCE(name, '') || ' ' || COALESCE(title, '') || ' ' || COALESCE(legal_title, '') || ' ' || COALESCE(description, ''));
        '''))
def upgrade():
    op.add_column(
        'job_view_session',
        sa.Column('cointoss', sa.Boolean(), nullable=False,
                  server_default='0'),
    )
    op.add_column(
        'job_view_session',
        sa.Column('crosstoss',
                  sa.Boolean(),
                  nullable=False,
                  server_default='0'),
    )
    op.alter_column('job_view_session', 'cointoss', server_default=None)
    op.alter_column('job_view_session', 'crosstoss', server_default=None)
    # Update the cointoss and crosstoss columns for existing data
    op.execute(
        sa.DDL('''
        UPDATE job_view_session SET cointoss=jvs.cointoss, crosstoss=jvs.crosstoss FROM (
            SELECT job_view_session.event_session_id, job_view_session.jobpost_id, TRUE as cointoss,
                CASE WHEN job_impression.bgroup != job_view_session.bgroup AND job_view_session.bgroup IS NOT NULL
                    THEN TRUE ELSE FALSE END AS crosstoss
            FROM job_view_session, job_impression
            WHERE job_view_session.event_session_id = job_impression.event_session_id
                AND job_view_session.jobpost_id = job_impression.jobpost_id
        ) AS jvs
        WHERE job_view_session.event_session_id = jvs.event_session_id
            AND job_view_session.jobpost_id = jvs.jobpost_id;
        '''))
Esempio n. 23
0
def downgrade():
    op.add_column('proposal',
                  sa.Column('preview_video', sa.UnicodeText(), nullable=True))

    conn = op.get_bind()
    proposals = conn.execute(proposal.select().where(
        proposal.c.video_id.isnot(None)))
    for prop in proposals:
        conn.execute(
            sa.update(proposal).where(proposal.c.id == prop['id']).values(
                preview_video=make_video_url(prop['video_source'],
                                             prop['video_id'])))

    conn.execute(
        sa.update(proposal).where(proposal.c.preview_video.isnot(None)).values(
            video_source=None, video_id=None))

    op.execute(
        sa.DDL(
            dedent('''
        UPDATE proposal SET search_vector = setweight(to_tsvector('english', COALESCE(title, '')), 'A') || setweight(to_tsvector('english', COALESCE(abstract_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(outline_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(requirements_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(slides, '')), 'B') || setweight(to_tsvector('english', COALESCE(preview_video, '')), 'C') || setweight(to_tsvector('english', COALESCE(links, '')), 'B') || setweight(to_tsvector('english', COALESCE(bio_text, '')), 'B');

        DROP TRIGGER proposal_search_vector_trigger ON proposal;
        DROP FUNCTION proposal_search_vector_update();

        CREATE FUNCTION proposal_search_vector_update() RETURNS trigger AS $$
        BEGIN
            NEW.search_vector := setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'A') || setweight(to_tsvector('english', COALESCE(NEW.abstract_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.outline_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.requirements_text, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.slides, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.preview_video, '')), 'C') || setweight(to_tsvector('english', COALESCE(NEW.links, '')), 'B') || setweight(to_tsvector('english', COALESCE(NEW.bio_text, '')), 'B');
            RETURN NEW;
        END
        $$ LANGUAGE plpgsql;

        CREATE TRIGGER proposal_search_vector_trigger BEFORE INSERT OR UPDATE ON proposal
        FOR EACH ROW EXECUTE PROCEDURE proposal_search_vector_update();
                ''')))
Esempio n. 24
0
    def create_triggers(self, schema, tables=None):
        """Create a database triggers."""
        self.execute(CREATE_TRIGGER_TEMPLATE)
        views = sa.inspect(self.engine).get_view_names(schema)
        queries = []
        for table in self.tables(schema):
            schema, table = self._get_schema(schema, table)
            if (tables and table not in tables) or (table in views):
                continue
            logger.debug(f'Creating trigger on table: {schema}.{table}')
            for name, for_each, tg_op in [
                ('notify', 'ROW', ['INSERT', 'UPDATE', 'DELETE']),
                ('truncate', 'STATEMENT', ['TRUNCATE']),
            ]:
                queries.append(
                    sa.DDL(
                        f'CREATE TRIGGER {table}_{name} '
                        f'AFTER {" OR ".join(tg_op)} ON "{schema}"."{table}" '
                        f'FOR EACH {for_each} EXECUTE PROCEDURE '
                        f'{TRIGGER_FUNC}()',
                    )
                )

        for query in queries:
            self.execute(query)
Esempio n. 25
0
def upgrade():
    op.add_column('jobpost', sa.Column('search_vector', postgresql.TSVECTOR(), nullable=True))
    op.execute(sa.DDL(
        '''
        CREATE FUNCTION jobpost_search_vector_update() RETURNS TRIGGER AS $$
        BEGIN
            IF TG_OP = 'INSERT' THEN
                NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, ''));
            END IF;
            IF TG_OP = 'UPDATE' THEN
                IF NEW.headline <> OLD.headline OR COALESCE(NEW.headlineb, '') <> COALESCE(OLD.headlineb, '') OR NEW.description <> OLD.description OR NEW.perks <> OLD.perks THEN
                    NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, ''));
                END IF;
            END IF;
            RETURN NEW;
        END
        $$ LANGUAGE 'plpgsql';

        CREATE TRIGGER jobpost_search_vector_trigger BEFORE INSERT OR UPDATE ON jobpost
        FOR EACH ROW EXECUTE PROCEDURE jobpost_search_vector_update();

        CREATE INDEX ix_jobpost_search_vector ON jobpost USING gin(search_vector);

        UPDATE jobpost SET search_vector = to_tsvector('english', COALESCE(company_name, '') || ' ' || COALESCE(headline, '') || ' ' || COALESCE(headlineb, '') || ' ' || COALESCE(description, '') || ' ' || COALESCE(perks, ''));
        '''))
def upgrade():
    op.execute(
        sa.DDL("""
UPDATE projects
    SET workbench = (regexp_replace(workbench::text, '"state": ("[^"]*")', '"state": {"currentStatus": \\1}', 'g'))::json
    WHERE workbench::text LIKE '%%state%%'
        """))
def downgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.execute(
        sa.DDL("UPDATE projects SET thumbnail = '' WHERE thumbnail IS NULL"))
    op.alter_column("projects",
                    "thumbnail",
                    existing_type=sa.VARCHAR(),
                    nullable=False)

    op.execute(
        sa.DDL(
            "UPDATE projects SET description = '' WHERE description IS NULL"))
    op.alter_column("projects",
                    "description",
                    existing_type=sa.VARCHAR(),
                    nullable=False)
Esempio n. 28
0
def downgrade():
    op.execute(
        sa.DDL('''
        DROP TRIGGER project_venue_primary_trigger ON project_venue_primary;
        DROP FUNCTION project_venue_primary_validate();
    '''))
    op.drop_table('project_venue_primary')
def downgrade():
    op.add_column(
        'user_notification',
        sa.Column(
            'is_revoked',
            sa.BOOLEAN(),
            autoincrement=False,
            nullable=False,
            server_default=sa.sql.expression.false(),
        ),
    )

    op.execute(
        sa.DDL('UPDATE user_notification SET is_revoked = TRUE'
               ' WHERE revoked_at IS NOT NULL;'))

    op.alter_column('user_notification', 'is_revoked', server_default=None)

    op.create_index(
        'ix_user_notification_is_revoked',
        'user_notification',
        ['is_revoked'],
        unique=False,
    )
    op.drop_index(op.f('ix_user_notification_revoked_at'),
                  table_name='user_notification')
    op.drop_column('user_notification', 'revoked_at')
def upgrade():
    # Remove duplicate entries before dropping the project_id column
    op.execute(
        sa.DDL(
            '''
        DELETE FROM contact_exchange WHERE (user_id, project_id, participant_id) IN (
            SELECT user_id, project_id, participant_id
            FROM (SELECT user_id, project_id, participant_id, row_number()
                OVER (partition by user_id, participant_id ORDER BY created_at)
                FROM contact_exchange) AS duplicates
            WHERE row_number != 1);
    '''
        )
    )
    # Index by participant id
    op.create_index(
        op.f('ix_contact_exchange_participant_id'),
        'contact_exchange',
        ['participant_id'],
        unique=False,
    )
    # Drop the primary key
    op.drop_constraint('contact_exchange_pkey', 'contact_exchange', type_='primary')
    # Recreate primary key without project_id
    op.create_primary_key(
        'contact_exchange_pkey', 'contact_exchange', ['user_id', 'participant_id']
    )
    # Finally, drop the project_id foreign key constraint and then the column itself
    op.drop_constraint(
        'contact_exchange_project_id_fkey', 'contact_exchange', type_='foreignkey'
    )
    op.drop_column('contact_exchange', 'project_id')