def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    volumes = Table('volumes', meta, autoload=True)
    attach_string = Column('attachtime_string', String(255))
    attach_string.create(volumes)

    old_attachtime = volumes.c.attach_time

    try:
        volumes_list = list(volumes.select().execute())
        for v in volumes_list:
            attach_time = select([volumes.c.attach_time],
                volumes.c.id == v['id'])
            volumes.update().\
                where(volumes.c.id == v['id']).\
                values(attach_string=attach_time).execute()
    except Exception:
        attach_datetime.drop()
        raise

    old_attachtime.alter(name='attach_time_old')
    attach_string.alter(name='attach_time')
    old_attachtime.drop()
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print __doc__
    metadata.reflect()
    try:
        Job_table = Table( "job", metadata, autoload=True )
    except NoSuchTableError:
        Job_table = None
        log.debug( "Failed loading table job" )
    if Job_table is not None:

        if migrate_engine.name != 'sqlite':
            try:
                col = Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=True )
                col.create( Job_table, index_name='ix_job_user_id' )
                assert col is Job_table.c.user_id
            except Exception, e:
                log.debug( "Adding column 'user_id' to job table failed: %s" % ( str( e ) ) )
        else:
            try:
                col = Column( "user_id", Integer, nullable=True)
                col.create( Job_table )
                assert col is Job_table.c.user_id
            except Exception, e:
                log.debug( "Adding column 'user_id' to job table failed: %s" % ( str( e ) ) )
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    block_device_mapping = Table('block_device_mapping', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    uuid_column = Column('instance_uuid', String(36))
    uuid_column.create(block_device_mapping)

    try:
        block_device_mapping.update().values(
            instance_uuid=select(
                [instances.c.uuid],
                instances.c.id == block_device_mapping.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    fkeys = list(block_device_mapping.c.instance_id.foreign_keys)
    if fkeys:
        try:
            fkey_name = fkeys[0].constraint.name
            ForeignKeyConstraint(
                columns=[block_device_mapping.c.instance_id],
                refcolumns=[instances.c.id],
                name=fkey_name).drop()
        except Exception:
            LOG.error(_("foreign key constraint couldn't be removed"))
            raise

    block_device_mapping.c.instance_id.drop()
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    ToolShedRepository_table = Table("tool_shed_repository", metadata, autoload=True)
    col = Column("installed_changeset_revision", TrimmedString(255))
    try:
        col.create(ToolShedRepository_table)
        assert col is ToolShedRepository_table.c.installed_changeset_revision
    except Exception:
        log.exception("Adding installed_changeset_revision column to the tool_shed_repository table failed.")
    # Update each row by setting the value of installed_changeset_revison to be the value of changeset_revision.
    # This will be problematic if the value of changeset_revision was updated to something other than the value
    # that it was when the repository was installed (because the install path determined in real time will attempt to
    # find the repository using the updated changeset_revison instead of the required installed_changeset_revision),
    # but at the time this script was written, this scenario is extremely unlikely.
    cmd = "SELECT id AS id, " \
        + "installed_changeset_revision AS installed_changeset_revision, " \
        + "changeset_revision AS changeset_revision " \
        + "FROM tool_shed_repository;"
    tool_shed_repositories = migrate_engine.execute(cmd).fetchall()
    update_count = 0
    for row in tool_shed_repositories:
        cmd = "UPDATE tool_shed_repository " \
            + "SET installed_changeset_revision = '%s' " % row.changeset_revision \
            + "WHERE changeset_revision = '%s';" % row.changeset_revision
        migrate_engine.execute(cmd)
        update_count += 1
    print("Updated the installed_changeset_revision column for ", update_count, " rows in the tool_shed_repository table.  ")
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    try:
        Request_table = Table("request", metadata, autoload=True)
    except NoSuchTableError:
        Request_table = None
        log.debug("Failed loading table 'request'")

    if Request_table is not None:
        # create the column again as JSONType
        try:
            col = Column("notification", JSONType())
            col.create(Request_table)
            assert col is Request_table.c.notification
        except Exception:
            log.exception("Creating column 'notification' in the 'request' table failed.")

        cmd = "SELECT id, user_id, notify FROM request"
        result = migrate_engine.execute(cmd)
        for r in result:
            id = int(r[0])
            notify_new = dict(email=[], sample_states=[], body='', subject='')
            cmd = "UPDATE request SET notification='%s' WHERE id=%i" % (dumps(notify_new), id)
            migrate_engine.execute(cmd)

        # remove the 'notify' column for non-sqlite databases.
        if migrate_engine.name != 'sqlite':
            try:
                Request_table.c.notify.drop()
            except Exception:
                log.exception("Deleting column 'notify' from the 'request' table failed.")
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    recordset_id = Column('designate_recordset_id', UUID())
    recordset_id.create(records_table)
def downgrade(migrate_engine):
    metadata.bind = migrate_engine
    metadata.reflect()
    # Drop missing_test_components and tool_test_results from the repository_metadata table and add tool_test_errors to the repository_metadata table.
    RepositoryMetadata_table = Table("repository_metadata", metadata, autoload=True)

    # Drop the missing_test_components column.
    try:
        RepositoryMetadata_table.c.missing_test_components.drop()
    except Exception:
        log.exception("Dropping column missing_test_components from the repository_metadata table failed.")

    # Drop the tool_test_results column.
    try:
        RepositoryMetadata_table.c.tool_test_results.drop()
    except Exception:
        log.exception("Dropping column tool_test_results from the repository_metadata table failed.")

    # Create the tool_test_errors column.
    c = Column("tool_test_errors", JSONType, nullable=True)
    try:
        c.create(RepositoryMetadata_table)
        assert c is RepositoryMetadata_table.c.tool_test_errors
    except Exception:
        log.exception("Adding tool_test_errors column to the repository_metadata table failed.")
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    instances = Table('instances', meta, autoload=True)
    instance_types = Table('instance_types', meta, autoload=True)

    for table in (instances, instance_types):
        local_gb = Column('local_gb', Integer)
        local_gb.create(table)

    try:
        for table in (instances, instance_types):
            if FLAGS.connection_type == 'libvirt':
                column = table.c.ephemeral_gb
            else:
                column = table.c.root_gb
            table.update().values(local_gb=column).execute()
    except Exception:
        for table in (instances, instance_types):
            table.drop_column('local_gb')
        raise

    default_ephemeral_device = instances.c.default_ephemeral_device
    default_ephemeral_device.alter(name='default_local_device')

    for table in (instances, instance_types):
        table.drop_column('root_gb')
        table.drop_column('ephemeral_gb')
Example #9
0
 def upgrade_2_DDL(self):
     node = Table('node', self._metadata)
     min_voltage = Column('min_voltage', Integer, default=0)            
     min_voltage.create(node)
     
     max_voltage = Column('max_voltage', Integer, default=0)
     max_voltage.create(node)        
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    records_table = Table('records', meta, autoload=True)

    # Create the new inherit_ttl column
    inherit_ttl = Column('inherit_ttl', Boolean(), default=True)
    inherit_ttl.create(records_table)

    # Semi-Populate the new inherit_ttl column. We'll need to do a cross-db
    # join from powerdns.records -> powerdns.domains -> designate.domains, so
    # we can't perform the second half here.
    query = records_table.update().values(inherit_ttl=False)
    query = query.where(records_table.c.ttl != None)
    query.execute()

    # If there are records without an explicity configured TTL, we'll need
    # a manual post-migration step.
    query = records_table.select()
    query = query.where(records_table.c.ttl == None)
    c = query.count()

    if c > 0:
        pmq = ('UPDATE powerdns.records JOIN powerdns.domains ON powerdns.reco'
               'rds.domain_id = powerdns.domains.id JOIN designate.domains ON '
               'powerdns.domains.designate_id = designate.domains.id SET power'
               'dns.records.ttl = designate.domains.ttl WHERE powerdns.records'
               '.inherit_ttl = 1;')

        LOG.warning(_LW('**** A manual post-migration step is required ****'))
        LOG.warning(_LW('Please issue this query: %s' % pmq))
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    instances = Table('instances', meta, autoload=True)

    data_present = check_instance_presence(migrate_engine, instances)

    if data_present and not FLAGS.connection_type:
        msg = ("Found instance records in database. You must specify "
               "connection_type to run migration migration")
        raise exception.Error(msg)

    instance_types = Table('instance_types', meta, autoload=True)

    for table in (instances, instance_types):
        root_gb = Column('root_gb', Integer)
        root_gb.create(table)
        ephemeral_gb = Column('ephemeral_gb', Integer)
        ephemeral_gb.create(table)

    # Since this migration is part of the work to get all drivers
    # working the same way, we need to treat the new root_gb and
    # ephemeral_gb columns differently depending on what the
    # driver implementation used to behave like.
    if FLAGS.connection_type == 'libvirt':
        upgrade_libvirt(instances, instance_types)
    else:
        upgrade_other(instances, instance_types)

    default_local_device = instances.c.default_local_device
    default_local_device.alter(name='default_ephemeral_device')

    for table in (instances, instance_types):
        table.drop_column('local_gb')
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    instance_table = Table('instance', meta,
        Column('id', Integer, primary_key=True),
        Column('key', Unicode(20), nullable=False, unique=True),
        Column('label', Unicode(255), nullable=False),
        Column('description', UnicodeText(), nullable=True),
        Column('required_majority', Float, nullable=False),
        Column('activation_delay', Integer, nullable=False),
        Column('create_time', DateTime, default=func.now()),
        Column('access_time', DateTime, default=func.now(), onupdate=func.now()),
        Column('delete_time', DateTime, nullable=True),
        Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
        Column('default_group_id', Integer, ForeignKey('group.id'), nullable=True),
        Column('allow_adopt', Boolean, default=True),       
        Column('allow_delegate', Boolean, default=True),
        Column('allow_index', Boolean, default=True),
        Column('hidden', Boolean, default=False),
        Column('locale', Unicode(7), nullable=True),
        Column('css', UnicodeText(), nullable=True),
        Column('use_norms', Boolean, nullable=True, default=True)
    )
    
    propose = Column('allow_propose', Boolean, default=True)
    propose.create(instance_table)
    u = instance_table.update(values={'allow_propose': True})
    migrate_engine.execute(u)
Example #13
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    user_table = Table('user', meta, autoload=True)
    instance_table = Table('instance', meta, autoload=True)
    delegateable_table = Table('delegateable', meta, autoload=True)

    milestone_table = Table('milestone', meta,
        Column('id', Integer, primary_key=True),
        Column('instance_id', Integer, ForeignKey('instance.id'), nullable=False),
        Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
        Column('title', Unicode(255), nullable=True),
        Column('text', UnicodeText(), nullable=True),
        Column('time', DateTime),
        Column('create_time', DateTime, default=datetime.utcnow),
        Column('delete_time', DateTime)
        )
    milestone_table.create()
    ms_col = Column('milestone_id', Integer, ForeignKey('milestone.id'), nullable=True)
    ms_col.create(delegateable_table)

    ms_bool = Column('milestones', Boolean, default=False)
    ms_bool.create(instance_table)
    u = instance_table.update(values={'milestones': False})
    migrate_engine.execute(u)
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    try:
        LibraryDataset_table = Table("library_dataset", metadata, autoload=True)
        c = Column("purged", Boolean, index=True, default=False)
        c.create(LibraryDataset_table, index_name='ix_library_dataset_purged')
        assert c is LibraryDataset_table.c.purged
    except Exception:
        log.exception("Adding purged column to library_dataset table failed.")
    # Update the purged flag to the default False
    cmd = "UPDATE library_dataset SET purged = %s;" % engine_false(migrate_engine)
    try:
        migrate_engine.execute(cmd)
    except Exception:
        log.exception("Setting default data for library_dataset.purged column failed.")

    # Update the purged flag for those LibaryDatasets whose purged flag should be True.  This happens
    # when the LibraryDataset has no active LibraryDatasetDatasetAssociations.
    cmd = "SELECT * FROM library_dataset WHERE deleted = %s;" % engine_true(migrate_engine)
    deleted_lds = migrate_engine.execute(cmd).fetchall()
    for row in deleted_lds:
        cmd = "SELECT * FROM library_dataset_dataset_association WHERE library_dataset_id = %d AND library_dataset_dataset_association.deleted = %s;" % (int(row.id), engine_false(migrate_engine))
        active_lddas = migrate_engine.execute(cmd).fetchall()
        if not active_lddas:
            print("Updating purged column to True for LibraryDataset id : ", int(row.id))
            cmd = "UPDATE library_dataset SET purged = %s WHERE id = %d;" % (engine_true(migrate_engine), int(row.id))
            migrate_engine.execute(cmd)
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    instance_actions = Table('instance_actions', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    uuid_column = Column('instance_uuid', String(36))
    uuid_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_uuid=select(
                [instances.c.uuid],
                instances.c.id == instance_actions.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    if not dialect.startswith('sqlite'):
        fkeys = list(instance_actions.c.instance_id.foreign_keys)
        if fkeys:
            try:
                fkey_name = fkeys[0].constraint.name
                ForeignKeyConstraint(columns=[instance_actions.c.instance_id],
                                     refcolumns=[instances.c.id],
                                     name=fkey_name).drop()
            except Exception:
                LOG.error(_("foreign key constraint couldn't be removed"))
                raise

    instance_actions.c.instance_id.drop()
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)
    volumes = Table('volumes', meta, autoload=True)
    instance_id_column = Column('instance_id', Integer)

    instance_id_column.create(volumes)
    try:
        volumes.update().values(
            instance_id=select(
                [instances.c.id],
                instances.c.uuid == volumes.c.instance_uuid)
                ).execute()
    except Exception:
        instance_id_column.drop()

    fkeys = list(volumes.c.instance_id.foreign_keys)
    if fkeys:
        try:
            fk_name = fkeys[0].constraint.name
            ForeignKeyConstraint(
                columns=[volumes.c.instance_id],
                refcolumns=[instances.c.id],
                name=fk_name).create()

        except Exception:
            LOG.error(_("foreign key could not be created"))
            raise

    volumes.c.instance_uuid.drop()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name
    instance_actions = _get_table("instance_actions")
    instances = _get_table("instances")
    uuid_column = Column("instance_uuid", String(36), ForeignKey("instances.uuid"))
    uuid_column = Column("instance_uuid", String(36))
    uuid_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_uuid=select([instances.c.uuid], instances.c.id == instance_actions.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    if not dialect.startswith("sqlite"):
        fkeys = list(instance_actions.c.instance_id.foreign_keys)
        if fkeys:
            try:
                fkey_name = fkeys[0].constraint.name
                ForeignKeyConstraint(
                    columns=[instance_actions.c.instance_id], refcolumns=[instances.c.id], name=fkey_name
                ).drop()
            except:
                logging.error(_("foreign key constraint couldn't be removed"))
                raise

    instance_actions.c.instance_id.drop()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    instance_metadata = Table("instance_metadata", meta, autoload=True)
    instances = Table("instances", meta, autoload=True)
    uuid_column = Column("instance_uuid", String(36))
    uuid_column.create(instance_metadata)

    try:
        instance_metadata.update().values(
            instance_uuid=select([instances.c.uuid], instances.c.id == instance_metadata.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    fkeys = list(instance_metadata.c.instance_id.foreign_keys)
    if fkeys:
        try:
            fkey_name = fkeys[0].constraint.name
            ForeignKeyConstraint(
                columns=[instance_metadata.c.instance_id], refcolumns=[instances.c.id], name=fkey_name
            ).drop()
        except Exception:
            LOG.error(_("foreign key constraint couldn't be removed"))
            raise

    instance_metadata.c.instance_id.drop()
Example #19
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    try:
        if migrate_engine.name == 'mysql':
            # Strip slug index prior to creation so we can do it manually.
            slug_index = None
            for ix in Page_table.indexes:
                if ix.name == 'ix_page_slug':
                    slug_index = ix
            Page_table.indexes.remove(slug_index)
        Page_table.create()
        if migrate_engine.name == 'mysql':
            # Create slug index manually afterward.
            i = Index("ix_page_slug", Page_table.c.slug, mysql_length=200)
            i.create()
    except Exception:
        log.exception("Could not create page table")
    try:
        PageRevision_table.create()
    except Exception:
        log.exception("Could not create page_revision table")

    # Add 1 column to the user table
    User_table = Table("galaxy_user", metadata, autoload=True)
    col = Column('username', String(255), index=True, unique=True, default=False)
    col.create(User_table, index_name='ix_user_username', unique_name='username')
    assert col is User_table.c.username
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    instance_table = Table(
        "instance",
        metadata,
        Column("id", Integer, primary_key=True),
        Column("key", Unicode(20), nullable=False, unique=True),
        Column("label", Unicode(255), nullable=False),
        Column("description", UnicodeText(), nullable=True),
        Column("required_majority", Float, nullable=False),
        Column("activation_delay", Integer, nullable=False),
        Column("create_time", DateTime, default=func.now()),
        Column("access_time", DateTime, default=func.now(), onupdate=func.now()),
        Column("delete_time", DateTime, nullable=True),
        Column("creator_id", Integer, ForeignKey("user.id"), nullable=False),
        Column("default_group_id", Integer, ForeignKey("group.id"), nullable=True),
        Column("allow_adopt", Boolean, default=True),
        Column("allow_delegate", Boolean, default=True),
        Column("allow_propose", Boolean, default=True),
        Column("allow_index", Boolean, default=True),
        Column("hidden", Boolean, default=False),
        Column("locale", Unicode(7), nullable=True),
        Column("css", UnicodeText(), nullable=True),
        Column("frozen", Boolean, default=False),
        Column("milestones", Boolean, default=False),
        Column("use_norms", Boolean, nullable=True, default=True),
        Column("require_selection", Boolean, nullable=True, default=False),
        Column("is_authenticated", Boolean, nullable=True, default=False),
    )

    hide_categories = Column("hide_global_categories", Boolean, nullable=True, default=False)
    hide_categories.create(instance_table)
    u = instance_table.update(values={"hide_global_categories": False})
    migrate_engine.execute(u)
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    volumes = Table('volumes', meta, autoload=True)
    attach_datetime = Column('attachtime_datetime', DateTime(timezone=False))
    attach_datetime.create(volumes)

    old_attachtime = volumes.c.attach_time

    try:
        volumes_list = list(volumes.select().execute())
        for v in volumes_list:
            attach_time = select([volumes.c.attach_time],
                volumes.c.id == v['id']).execute().fetchone()[0]
            volumes.update().\
                where(volumes.c.id == v['id']).\
                values(attachtime_datetime=attach_time).execute()
    except Exception:
        attach_datetime.drop()
        raise

    old_attachtime.alter(name='attach_time_old')
    attach_datetime.alter(name='attach_time')
    old_attachtime.drop()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    instance_actions = _get_table('instance_actions')
    instances = _get_table('instances')
    uuid_column = Column('instance_uuid', String(36),
                         ForeignKey('instances.uuid'))
    uuid_column = Column('instance_uuid', String(36))
    uuid_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_uuid=select(
                [instances.c.uuid],
                instances.c.id == instance_actions.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    try:
        fkey_name = list(instance_actions.c.instance_id.foreign_keys)[0].constraint.name
        ForeignKeyConstraint(columns=[instance_actions.c.instance_id],
                             refcolumns=[instances.c.id],
                             name=fkey_name).drop()
    except Exception:
        logging.error(_("foreign key constraint couldn't be removed"))
        raise

    instance_actions.c.instance_id.drop()
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    instance_actions = _get_table('instance_actions')
    instances = _get_table('instances')
    uuid_column = Column('instance_uuid', String(36),
                         ForeignKey('instances.uuid'))
    uuid_column = Column('instance_uuid', String(36))
    uuid_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_uuid=select(
                [instances.c.uuid],
                instances.c.id == instance_actions.c.instance_id)
        ).execute()
    except Exception:
        uuid_column.drop()
        raise

    if migrate_engine.name == "mysql":
        try:
            migrate_engine.execute("ALTER TABLE instance_actions " \
                "DROP FOREIGN KEY instance_actions_ibfk_1;")
        except Exception:  # Don't care, just fail silently.
            pass

    instance_actions.c.instance_id.drop()
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    ToolDependency_table = Table( "tool_dependency", metadata, autoload=True )
    if migrate_engine.name == 'sqlite':
        col = Column( "status", TrimmedString( 255 ))
    else:
        col = Column( "status", TrimmedString( 255 ), nullable=False)
    try:
        col.create( ToolDependency_table )
        assert col is ToolDependency_table.c.status
    except Exception as e:
        print("Adding status column to the tool_dependency table failed: %s" % str( e ))
    col = Column( "error_message", TEXT )
    try:
        col.create( ToolDependency_table )
        assert col is ToolDependency_table.c.error_message
    except Exception as e:
        print("Adding error_message column to the tool_dependency table failed: %s" % str( e ))

    if migrate_engine.name != 'sqlite':
        # This breaks in sqlite due to failure to drop check constraint.
        # TODO move to alembic.
        try:
            ToolDependency_table.c.uninstalled.drop()
        except Exception as e:
            print("Dropping uninstalled column from the tool_dependency table failed: %s" % str( e ))
Example #25
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    message_table.create()
    message_recipient_table.create()

    email_messages = Column('email_messages', Boolean, default=True)
    email_messages.create(user_table)
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    integer_column = instances.c.instance_type_id
    string_column = Column('instance_type_id_str',
                           String(length=255, convert_unicode=False,
                                  assert_unicode=None, unicode_error=None,
                                  _warn_on_bytestring=False),
                           nullable=True)

    types = {}
    for instance in migrate_engine.execute(instances.select()):
        if instance.instance_type_id is None:
            types[instance.id] = None
        else:
            types[instance.id] = str(instance.instance_type_id)

    string_column.create(instances)
    for instance_id, instance_type_id in types.iteritems():
        update = instances.update().\
                where(instances.c.id == instance_id).\
                values(instance_type_id_str=instance_type_id)
        migrate_engine.execute(update)

    integer_column.alter(name='instance_type_id_int')
    string_column.alter(name='instance_type_id')
    integer_column.drop()
Example #27
0
def wants_notifications(db):
    """Add a wants_notifications field to User model"""
    metadata = MetaData(bind=db.bind)
    user_table = inspect_table(metadata, "core__users")
    col = Column('wants_notifications', Boolean, default=True)
    col.create(user_table)
    db.commit()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    instances = Table('instances', meta, autoload=True)

    types = {}
    for instance in migrate_engine.execute(instances.select()):
        if instance.instance_type_id is None:
            types[instance.id] = None
            continue
        try:
            types[instance.id] = int(instance.instance_type_id)
        except ValueError:
            LOG.warn("Instance %s did not have instance_type_id "
                         "converted to an integer because its value is %s" %
                          (instance.id, instance.instance_type_id))
            types[instance.id] = None

    integer_column = Column('instance_type_id_int', Integer(), nullable=True)
    string_column = instances.c.instance_type_id

    integer_column.create(instances)
    for instance_id, instance_type_id in types.iteritems():
        update = instances.update().\
                where(instances.c.id == instance_id).\
                values(instance_type_id_int=instance_type_id)
        migrate_engine.execute(update)

    string_column.alter(name='instance_type_id_str')
    integer_column.alter(name='instance_type_id')
    string_column.drop()
Example #29
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine

    show_norms_navigation = Column('show_norms_navigation',
                                   Boolean,
                                   default=True)
    show_norms_navigation.create(instance_table)
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    dialect = migrate_engine.url.get_dialect().name

    domains_table = Table('domains', meta, autoload=True)

    if dialect.startswith('sqlite'):
        # SQLite can't drop a constraint. Yay. This will be fun..

        # Create a new name column without the unique index
        name_tmp_column = Column('name_tmp', String(255))
        name_tmp_column.create(domains_table)

        # Copy the data over.
        query = update(domains_table).values(name_tmp=domains_table.c.name)
        migrate_engine.execute(query)

        # Delete the name column
        domains_table.c.name.drop()

        # Rename the name_tmp column to name
        domains_table.c.name_tmp.alter(name='name')
    elif dialect.startswith('postgresql'):
        constraint = UniqueConstraint('name', name='domains_name_key',
                                      table=domains_table)
        constraint.drop()
    else:
        constraint = UniqueConstraint('name', name='name', table=domains_table)
        constraint.drop()
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    display_migration_details()
    # Load existing tables
    Request_table = Table("request", metadata, autoload=True)
    Sample_table = Table("sample", metadata, autoload=True)
    metadata.reflect()
    # Add 1 column to the request table
    if Request_table is not None:
        try:
            col = Column('submitted', Boolean, default=False)
            col.create(Request_table)
            assert col is Request_table.c.submitted
        except Exception, e:
            log.debug("Adding column 'submitted' to request table failed: %s" %
                      (str(e)))
def downgrade(migrate_engine):
    metadata.bind = migrate_engine
    metadata.reflect()

    try:
        RequestType_table = Table("request_type", metadata, autoload=True)
    except NoSuchTableError:
        RequestType_table = None
        log.debug("Failed loading table 'request_type'")
    if RequestType_table is not None:
        # create the 'datatx_info' column
        try:
            col = Column("datatx_info", JSONType())
            col.create(RequestType_table)
            assert col is RequestType_table.c.datatx_info
        except Exception:
            log.exception(
                "Creating column 'datatx_info' in the 'request_type' table failed."
            )
        # restore the datatx_info column data in the request_type table with data from
        # the sequencer and the form_values table
        cmd = "SELECT request_type.id, form_values.content "\
              + " FROM request_type, sequencer, form_values "\
              + " WHERE request_type.sequencer_id=sequencer.id AND sequencer.form_values_id=form_values.id "\
              + " ORDER  BY request_type.id ASC"
        result = migrate_engine.execute(cmd)
        for row in result:
            request_type_id = row[0]
            seq_values = loads(str(row[1]))
            # create the datatx_info json dict
            datatx_info = dumps(
                dict(host=seq_values.get('field_0', ''),
                     username=seq_values.get('field_1', ''),
                     password=seq_values.get('field_2', ''),
                     data_dir=seq_values.get('field_3', ''),
                     rename_dataset=seq_values.get('field_4', '')))
            # update the column
            cmd = "UPDATE request_type SET datatx_info='%s' WHERE id=%i" % (
                datatx_info, request_type_id)
            migrate_engine.execute(cmd)
        # delete foreign key field to the sequencer table in the request_type table
        try:
            RequestType_table.c.sequencer_id.drop()
        except Exception:
            log.exception(
                "Deleting column 'sequencer_id' in the 'request_type' table failed."
            )
def upgrade_enterprise_dbs(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    for table_name in all_tables:
        table = Table(table_name, meta, autoload=True)

        new_deleted = Column('new_deleted', table.c.id.type,
                             default=get_default_deleted_value(table))
        new_deleted.create(table, populate_default=True)

        table.update().\
                where(table.c.deleted == True).\
                values(new_deleted=table.c.id).\
                execute()
        table.c.deleted.drop()
        table.c.new_deleted.alter(name="deleted")
Example #34
0
 def create(cls, *name):
     name = list(name)
     if isinstance(name[0], basestring):
         name[0] = Column.create(name[0])
     return {
         "operator": {"name": "DESC"},
         "operands": name
     }
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print __doc__
    metadata.reflect()
    ToolShedRepository_table = Table("tool_shed_repository",
                                     metadata,
                                     autoload=True)
    c = Column("metadata", JSONType(), nullable=True)
    try:
        c.create(ToolShedRepository_table)
        assert c is ToolShedRepository_table.c.metadata
    except Exception, e:
        print "Adding metadata column to the tool_shed_repository table failed: %s" % str(
            e)
        log.debug(
            "Adding metadata column to the tool_shed_repository table failed: %s"
            % str(e))
Example #36
0
def downgrade_enterprise_dbs(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    for table_name in all_tables:
        table = Table(table_name, meta, autoload=True)

        old_deleted = Column('old_deleted', Boolean, default=False)
        old_deleted.create(table, populate_default=False)

        table.update().\
                where(table.c.deleted == table.c.id).\
                values(old_deleted=True).\
                execute()

        table.c.deleted.drop()
        table.c.old_deleted.alter(name="deleted")
def upgrade(migrate_engine):
    print(__doc__)
    metadata.bind = migrate_engine
    metadata.reflect()
    Repository_table = Table("repository", metadata, autoload=True)
    c_remote = Column("remote_repository_url", TrimmedString(255))
    c_homepage = Column("homepage_url", TrimmedString(255))
    try:
        # Create
        c_remote.create(Repository_table)
        c_homepage.create(Repository_table)
        assert c_remote is Repository_table.c.remote_repository_url
        assert c_homepage is Repository_table.c.homepage_url
    except Exception:
        log.exception(
            "Adding remote_repository_url and homepage_url columns to the repository table failed."
        )
Example #38
0
def serial_updater(column, table, columns):
    """
    Updater for a serial column.
    :param serial_column: Serial column.
    :type serial_column: SerialColumn
    :param columns: Existing column names in the database for the given table.
    :type columns: list
    """
    col = Column(column.name, Integer, primary_key=True)

    if col.name in columns:
        return col

    if column.action == DbItem.CREATE:
        col.create(table=table)

    return col
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()

    try:
        Dataset_table = Table("dataset", metadata, autoload=True)
        c = Column('total_size', Numeric(15, 0))
        c.create(Dataset_table)
        assert c is Dataset_table.c.total_size
    except Exception as e:
        print("Adding total_size column to dataset table failed: %s" % str(e))
        log.debug("Adding total_size column to dataset table failed: %s" %
                  str(e))

    try:
        HistoryDatasetAssociation_table = Table("history_dataset_association",
                                                metadata,
                                                autoload=True)
        c = Column("purged", Boolean, index=True, default=False)
        c.create(HistoryDatasetAssociation_table,
                 index_name="ix_history_dataset_association_purged")
        assert c is HistoryDatasetAssociation_table.c.purged
        migrate_engine.execute(
            HistoryDatasetAssociation_table.update().values(purged=False))
    except Exception as e:
        print(
            "Adding purged column to history_dataset_association table failed: %s"
            % str(e))
        log.debug(
            "Adding purged column to history_dataset_association table failed: %s"
            % str(e))

    try:
        User_table = Table("galaxy_user", metadata, autoload=True)
        c = Column('disk_usage', Numeric(15, 0), index=True)
        c.create(User_table, index_name="ix_galaxy_user_disk_usage")
        assert c is User_table.c.disk_usage
    except Exception as e:
        print("Adding disk_usage column to galaxy_user table failed: %s" %
              str(e))
        log.debug("Adding disk_usage column to galaxy_user table failed: %s" %
                  str(e))

    try:
        GalaxySession_table = Table("galaxy_session", metadata, autoload=True)
        c = Column('disk_usage', Numeric(15, 0), index=True)
        c.create(GalaxySession_table,
                 index_name="ix_galaxy_session_disk_usage")
        assert c is GalaxySession_table.c.disk_usage
    except Exception as e:
        print("Adding disk_usage column to galaxy_session table failed: %s" %
              str(e))
        log.debug(
            "Adding disk_usage column to galaxy_session table failed: %s" %
            str(e))
def upgrade(migrate_engine):
    print __doc__
    metadata.bind = migrate_engine
    metadata.reflect()
    # Create and initialize imported column in job table.
    Repository_table = Table("repository", metadata, autoload=True)
    c = Column("email_alerts", JSONType, nullable=True)
    try:
        # Create
        c.create(Repository_table)
        assert c is Repository_table.c.email_alerts
    except Exception, e:
        print "Adding email_alerts column to the repository table failed: %s" % str(
            e)
        log.debug(
            "Adding email_alerts column to the repository table failed: %s" %
            str(e))
def upgrade(migrate_engine):
    print __doc__
    metadata.bind = migrate_engine
    metadata.reflect()
    # Create and initialize imported column in job table.
    Repository_table = Table("repository", metadata, autoload=True)
    c = Column("long_description", TEXT)
    try:
        # Create
        c.create(Repository_table)
        assert c is Repository_table.c.long_description
    except Exception, e:
        print "Adding long_description column to the repository table failed: %s" % str(
            e)
        log.debug(
            "Adding long_description column to the repository table failed: %s"
            % str(e))
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()

    # Create and initialize imported column in job table.
    Jobs_table = Table("job", metadata, autoload=True)
    c = Column("imported", Boolean, default=False, index=True)
    try:
        # Create
        c.create(Jobs_table, index_name="ix_job_imported")
        assert c is Jobs_table.c.imported

        migrate_engine.execute("UPDATE job SET imported=%s" %
                               engine_false(migrate_engine))
    except Exception:
        log.exception("Adding imported column to job table failed.")
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    consoles = Table('consoles', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
    id_column.create(consoles)

    try:
        consoles.update().values(
            instance_id=select([instances.c.id], instances.c.uuid ==
                               consoles.c.instance_uuid)).execute()
    except Exception:
        id_column.drop()
        raise

    consoles.c.instance_uuid.drop()
Example #44
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    sgia = Table('security_group_instance_association', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
    id_column.create(sgia)

    try:
        sgia.update().values(
            instance_id=select([instances.c.id], instances.c.uuid ==
                               sgia.c.instance_uuid)).execute()
    except Exception:
        id_column.drop()
        raise

    sgia.c.instance_uuid.drop()
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    block_device_mapping = Table('block_device_mapping', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
    id_column.create(block_device_mapping)

    try:
        block_device_mapping.update().values(instance_id=select(
            [instances.c.id], instances.c.uuid ==
            block_device_mapping.c.instance_uuid)).execute()
    except Exception:
        id_column.drop()
        raise

    block_device_mapping.c.instance_uuid.drop()
Example #46
0
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print __doc__
    metadata.reflect()
    ToolShedRepository_table = Table("tool_shed_repository",
                                     metadata,
                                     autoload=True)
    col = Column("installed_changeset_revision", TrimmedString(255))
    try:
        col.create(ToolShedRepository_table)
        assert col is ToolShedRepository_table.c.installed_changeset_revision
    except Exception, e:
        print "Adding installed_changeset_revision column to the tool_shed_repository table failed: %s" % str(
            e)
        log.debug(
            "Adding installed_changeset_revision column to the tool_shed_repository table failed: %s"
            % str(e))
Example #47
0
def _update_col(column, table, data_type, columns):
    """
    Update the column based on the database operation.
    :param column: Base column.
    :type column: BaseColumn
    :returns: SQLAlchemy column object.
    :rtype: Column
    :param columns: Existing column names in the database for the given table.
    :type columns: list
    """
    alchemy_column = Column(column.name, data_type, **_base_col_attrs(column))

    idx_name = None
    if column.index:
        idx_name = u'idx_{0}_{1}'.format(column.entity.name, column.name)

    unique_name = None
    if column.unique:
        unique_name = u'unq_{0}_{1}'.format(column.entity.name, column.name)

    if column.action == DbItem.CREATE:
        #Ensure the column does not exist otherwise an exception will be thrown
        if not column.name in columns:
            alchemy_column.create(table=table,
                                  index_name=idx_name,
                                  unique_name=unique_name)

    elif column.action == DbItem.ALTER:
        #Ensure the column exists before altering
        if column.name in columns:
            col_attrs = _base_col_attrs(column)
            col_attrs['table'] = table
            alchemy_column.alter(**col_attrs)

    elif column.action == DbItem.DROP:
        #Ensure the column exists before dropping
        if column.name in columns:
            _clear_ref_in_entity_relations(column)
            alchemy_column.drop(table=table)

    #Ensure column is added to the table
    if alchemy_column.table is None:
        alchemy_column._set_parent(table)

    return alchemy_column
Example #48
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    suggestion = Table(
        'suggestion', meta,
        Column('id', Integer, primary_key=True),
        Column('user_id', Integer, ForeignKey('user.id')),
        Column('person_id', Integer, ForeignKey('person.id')),
        Column('name', String(30)),
        Column('value', Text()),
        Column('date', DateTime(timezone=True)))

    admin_id_c = Column('admin_id', Integer,
                        ForeignKey('user.id'))
    admin_id_c.create(suggestion)

    admin_decision_c = Column('decision', String(10))
    admin_decision_c.create(suggestion)
def upgrade(migrate_engine):
    print(__doc__)
    metadata.bind = migrate_engine
    metadata.reflect()
    RepositoryMetadata_table = Table("repository_metadata",
                                     metadata,
                                     autoload=True)
    c = Column("numeric_revision", Integer, index=True)
    try:
        # Create
        c.create(RepositoryMetadata_table, index_name="ix_numeric_revision")
        assert c is RepositoryMetadata_table.c.numeric_revision
    except Exception:
        log.exception(
            "Adding numeric_revision column to the repository table failed.")
    # Update the numeric_revision column to have the default undefined value.
    cmd = "UPDATE repository_metadata SET numeric_revision = -1"
    migrate_engine.execute(cmd)
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()

    try:
        OpenID_table = Table( "galaxy_user_openid", metadata, autoload=True )
        c = Column( "provider", TrimmedString( 255 ) )
        c.create( OpenID_table )
        assert c is OpenID_table.c.provider
    except Exception:
        log.exception("Adding provider column to galaxy_user_openid table failed.")

    try:
        cmd = "DELETE FROM galaxy_user_openid WHERE openid='%s'" % ( BAD_IDENTIFIER )
        migrate_engine.execute( cmd )
    except Exception:
        log.exception("Deleting bad Identifiers from galaxy_user_openid failed.")
Example #51
0
def downgrade(migrate_engine):
    meta.bind = migrate_engine
    instance_actions = _get_table('instance_actions')
    instances = _get_table('instances')
    id_column = Column('instance_id', Integer, ForeignKey('instances.id'))
    id_column.create(instance_actions)

    try:
        instance_actions.update().values(
            instance_id=select(
                [instances.c.id],
                instances.c.uuid == instance_actions.c.instance_uuid)
        ).execute()
    except Exception:
        id_column.drop()
        raise

    instance_actions.c.instance_uuid.drop()
def upgrade(migrate_engine):
    print(__doc__)
    metadata.bind = migrate_engine
    metadata.reflect()

    History_table = Table("history", metadata, autoload=True)

    # Mysql needs manual index creation because of max length index.
    if migrate_engine.name != 'mysql':
        # Create slug column.
        c = Column("slug", TEXT, index=True)
        c.create(History_table, index_name='ix_history_slug')
    else:
        c = Column("slug", TEXT)
        c.create(History_table, index_name='')
        i = Index("ix_history_slug", History_table.c.slug, mysql_length=200)
        i.create()
    assert c is History_table.c.slug
def upgrade(migrate_engine):
    # use sqlalchemy-migrate database connection
    metadata.bind = migrate_engine
    # autoload needed tables
    instance_table = Table('instance', metadata, autoload=True)
    # add hierachical columns to the table
    select_child_desc = Column('select_child_description',
                               Unicode(255),
                               default=u'',
                               nullable=True)
    parent = Column('parent_id',
                    Integer,
                    ForeignKey('badge.id', ondelete="CASCADE"),
                    nullable=True)
    # create/recreate the table
    select_child_desc.create(badge_table)
    select_child_desc.alter(nullable=False)
    parent.create(badge_table)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    rp = Table("redcap_project", meta, autoload=True)
    s = Table("study", meta, autoload=True)

    t = Table("participant_import_definition", meta, autoload=True)

    redcap_project_id = Column("redcap_project_id", Integer, index=True)
    redcap_project_id.create(
        t, index_name='idx__participant_import_definition__redcap_project_id')

    study_id = Column("study_id", Integer, index=True)
    study_id.create(t,
                    index_name='idx__participant_import_definition__study_id')

    t.append_constraint(ForeignKeyConstraint([redcap_project_id], [rp.c.id]))
    t.append_constraint(ForeignKeyConstraint([study_id], [s.c.id]))
Example #55
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    domains_table = Table('domains', meta, autoload=True)
    records_table = Table('records', meta, autoload=True)

    # Add a domain & record creation status for async backends

    domain_status = Column('status', Enum(name='domain_statuses',
                           *RESOURCE_STATUSES), nullable=False,
                           server_default='ACTIVE', default='ACTIVE')

    record_status = Column('status', Enum(name='record_statuses',
                           *RESOURCE_STATUSES), nullable=False,
                           server_default='ACTIVE', default='ACTIVE')

    domain_status.create(domains_table, populate_default=True)
    record_status.create(records_table, populate_default=True)
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    pid = Table("participant_import_definition", meta, autoload=True)

    t = Table("redcap_project", meta, autoload=True)
    t.c.participant_import_strategy_id.drop()

    participant_import_definition_id = Column(
        "participant_import_definition_id", Integer, index=True)
    participant_import_definition_id.create(
        t, index_name='idx__redcap_project__pid_id')

    t.append_constraint(
        ForeignKeyConstraint([participant_import_definition_id], [pid.c.id]))

    pis = Table("participant_import_strategy", meta, autoload=True)
    pis.drop()
def upgrade(migrate_engine):
    metadata.bind = migrate_engine
    print(__doc__)
    metadata.reflect()
    try:
        Job_table = Table("job", metadata, autoload=True)
    except NoSuchTableError:
        Job_table = None
        log.debug("Failed loading table job")
    if Job_table is not None:

        if migrate_engine.name != 'sqlite':
            try:
                col = Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True)
                col.create(Job_table, index_name='ix_job_user_id')
                assert col is Job_table.c.user_id
            except Exception:
                log.exception("Adding column 'user_id' to job table failed.")
        else:
            try:
                col = Column("user_id", Integer, nullable=True)
                col.create(Job_table)
                assert col is Job_table.c.user_id
            except Exception:
                log.exception("Adding column 'user_id' to job table failed.")
        try:
            cmd = "SELECT job.id AS galaxy_job_id, " \
                + "galaxy_session.user_id AS galaxy_user_id " \
                + "FROM job " \
                + "JOIN galaxy_session ON job.session_id = galaxy_session.id;"
            job_users = migrate_engine.execute(cmd).fetchall()
            print("Updating user_id column in job table for ", len(job_users), " rows...")
            print("")
            update_count = 0
            for row in job_users:
                if row.galaxy_user_id:
                    cmd = "UPDATE job SET user_id = %d WHERE id = %d" % (int(row.galaxy_user_id), int(row.galaxy_job_id))
                    update_count += 1
                migrate_engine.execute(cmd)
            print("Updated the user_id column for ", update_count, " rows in the job table.  ")
            print(len(job_users) - update_count, " rows have no user_id since the value was NULL in the galaxy_session table.")
            print("")
        except Exception:
            log.exception("Updating job.user_id column failed.")
def upgrade(migrate_engine):
    metadata.bind = migrate_engine

    print __doc__
    metadata.reflect()

    Visualization_table = Table("visualization", metadata, autoload=True)
    Visualization_revision_table = Table("visualization_revision",
                                         metadata,
                                         autoload=True)

    # Create dbkey columns.
    x = Column("dbkey", TEXT)
    y = Column("dbkey", TEXT)
    x.create(Visualization_table)
    y.create(Visualization_revision_table)
    # Manually create indexes for compatability w/ mysql_length.
    xi = Index("ix_visualization_dbkey",
               Visualization_table.c.dbkey,
               mysql_length=200)
    xi.create()
    yi = Index("ix_visualization_revision_dbkey",
               Visualization_revision_table.c.dbkey,
               mysql_length=200)
    yi.create()
    assert x is Visualization_table.c.dbkey
    assert y is Visualization_revision_table.c.dbkey

    all_viz = migrate_engine.execute(
        "SELECT visualization.id as viz_id, visualization_revision.id as viz_rev_id, visualization_revision.config FROM visualization_revision \
                    LEFT JOIN visualization ON visualization.id=visualization_revision.visualization_id"
    )
    for viz in all_viz:
        viz_id = viz['viz_id']
        viz_rev_id = viz['viz_rev_id']
        if viz[Visualization_revision_table.c.config]:
            dbkey = loads(viz[Visualization_revision_table.c.config]).get(
                'dbkey', "").replace("'", "\\'")
            migrate_engine.execute(
                "UPDATE visualization_revision SET dbkey='%s' WHERE id=%s" %
                (dbkey, viz_rev_id))
            migrate_engine.execute(
                "UPDATE visualization SET dbkey='%s' WHERE id=%s" %
                (dbkey, viz_id))
def upgrade(migrate_engine):
    meta.bind = migrate_engine

    instance_table = Table(
        'instance',
        meta,
        Column('id', Integer, primary_key=True),
        Column('key', Unicode(20), nullable=False, unique=True),
        Column('label', Unicode(255), nullable=False),
        Column('description', UnicodeText(), nullable=True),
        Column('required_majority', Float, nullable=False),
        Column('activation_delay', Integer, nullable=False),
        Column('create_time', DateTime, default=func.now()),
        Column('access_time',
               DateTime,
               default=func.now(),
               onupdate=func.now()),
        Column('delete_time', DateTime, nullable=True),
        Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
        Column('default_group_id',
               Integer,
               ForeignKey('group.id'),
               nullable=True),
        Column('allow_adopt', Boolean, default=True),
        Column('allow_delegate', Boolean, default=True),
        Column('allow_propose', Boolean, default=True),
        Column('allow_index', Boolean, default=True),
        Column('hidden', Boolean, default=False),
        Column('locale', Unicode(7), nullable=True),
        Column('css', UnicodeText(), nullable=True),
        Column('frozen', Boolean, default=False),
        Column('milestones', Boolean, default=False),
        Column('use_norms', Boolean, nullable=True, default=True),
        Column('require_selection', Boolean, nullable=True, default=False),
    )

    is_authenticated_column = Column('is_authenticated',
                                     Boolean,
                                     nullable=True,
                                     default=False)

    is_authenticated_column.create(instance_table)
    u = instance_table.update(values={'is_authenticated': False})
    migrate_engine.execute(u)
def upgrade(migrate_engine):
    meta = MetaData(bind=migrate_engine)
    study = Table('edge_study', meta, autoload=True)
    col_site = Column('site', NVARCHAR(500))
    col_site.create(study)
    col_portfolio_number = Column('portfolio_number', NVARCHAR(500))
    col_portfolio_number.create(study)
    col_status = Column('status', NVARCHAR(50))
    col_status.create(study)
    col_type = Column('type', NVARCHAR(50))
    col_type.create(study)