Example #1
0
 def test_implicit_execution(self):
     metadata = MetaData()
     table = Table(
         "test_table",
         metadata,
         Column("foo", Integer),
         test_needs_acid=True,
     )
     conn = testing.db.connect()
     metadata.create_all(bind=conn)
     try:
         trans = conn.begin()
         metadata.bind = conn
         t = table.insert()
         assert t.bind is conn
         table.insert().execute(foo=5)
         table.insert().execute(foo=6)
         table.insert().execute(foo=7)
         trans.rollback()
         metadata.bind = None
         assert (
             conn.execute("select count(*) from test_table").scalar() == 0
         )
     finally:
         metadata.drop_all(bind=conn)
    def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self):

        table_name = "__test_tmp_table__"
        uc_name = 'uniq_foo'
        values = [
            {'id': 1, 'a': 3, 'foo': 10},
            {'id': 2, 'a': 2, 'foo': 20},
            {'id': 3, 'a': 1, 'foo': 30}
        ]

        for key, engine in self.engines.items():
            meta = MetaData()
            meta.bind = engine
            test_table = Table(table_name, meta,
                               Column('id', Integer, primary_key=True,
                                      nullable=False),
                               Column('a', Integer),
                               Column('foo', CustomType, default=0),
                               UniqueConstraint('a', name='uniq_a'),
                               UniqueConstraint('foo', name=uc_name))
            test_table.create()

            engine.execute(test_table.insert(), values)
            if key == "sqlite":
                warnings.simplefilter("ignore", SAWarning)
                # NOTE(boris-42): Missing info about column `foo` that has
                #                 unsupported type CustomType.
                self.assertRaises(exception.NovaException,
                                  utils.drop_unique_constraint,
                                  engine, table_name, uc_name, 'foo')

                # NOTE(boris-42): Wrong type of foo instance. it should be
                #                 instance of sqlalchemy.Column.
                self.assertRaises(exception.NovaException,
                                  utils.drop_unique_constraint,
                                  engine, table_name, uc_name, 'foo',
                                  foo=Integer())

            foo = Column('foo', CustomType, default=0)
            utils.drop_unique_constraint(engine, table_name, uc_name, 'foo',
                                         foo=foo)

            s = test_table.select().order_by(test_table.c.id)
            rows = engine.execute(s).fetchall()

            for i in xrange(0, len(values)):
                v = values[i]
                self.assertEqual((v['id'], v['a'], v['foo']), rows[i])

            # NOTE(boris-42): Update data about Table from DB.
            meta = MetaData()
            meta.bind = engine
            test_table = Table(table_name, meta, autoload=True)
            constraints = filter(lambda c: c.name == uc_name,
                                 test_table.constraints)
            self.assertEqual(len(constraints), 0)
            self.assertEqual(len(test_table.constraints), 1)
            test_table.drop()
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    dns_domains_old = Table('dns_domains', meta, autoload=True)
    dns_domains_old.rename(name='dns_domains_old')

    # NOTE(dprince): manually remove pkey/fkey for postgres
    if migrate_engine.name == "postgresql":
        sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
              dns_domains_pkey;
              ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
              dns_domains_project_id_fkey;"""
        migrate_engine.execute(sql)

    #Bind new metadata to avoid issues after the rename
    meta = MetaData()
    meta.bind = migrate_engine

    dns_domains_new = Table('dns_domains', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('domain', String(length=512), primary_key=True, nullable=False),
        Column('scope', String(length=255)),
        Column('availability_zone', String(length=255)),
        Column('project_id', String(length=255)),
        mysql_engine='InnoDB',
        mysql_charset='latin1',
    )
    dns_domains_new.create()

    dns_domains_old = Table('dns_domains_old', meta, autoload=True)
    record_list = list(dns_domains_old.select().execute())
    for rec in record_list:
        row = dns_domains_new.insert()
        row.execute({'created_at': rec['created_at'],
                    'updated_at': rec['updated_at'],
                    'deleted_at': rec['deleted_at'],
                    'deleted': rec['deleted'],
                    'domain': rec['domain'],
                    'scope': rec['scope'],
                    'availability_zone': rec['availability_zone'],
                    'project_id': rec['project_id'],
                    })

    dns_domains_old.drop()

    # NOTE(dprince): We can't easily add the MySQL Fkey on the downgrade
    # because projects is 'utf8' where dns_domains is 'latin1'.
    if migrate_engine.name != "mysql":
        projects = Table('projects', meta, autoload=True)
        fkey = ForeignKeyConstraint(columns=[dns_domains_new.c.project_id],
                               refcolumns=[projects.c.id])
        fkey.create()
Example #4
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    token = Table('token', meta, autoload=True)
    old_id_col = token.c.id
    old_id_col.alter(name='id_hash')
    # Note: We obtain a new metadata reference to avoid
    # sqlalchemy.exc.ArgumentError:
    # Trying to redefine primary-key column 'id' as a non-primary-key...
    meta = MetaData()
    meta.bind = migrate_engine
    token = Table('token', meta, autoload=True)
    new_id = Column("id", String(2048))
    token.create_column(new_id)
Example #5
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    tables = define_tables(meta)
    tables.reverse()
    for table in tables:
        table.drop()
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine
    #
    # New Tables
    #
    instance_faults = Table('instance_faults', meta,
            Column('created_at', DateTime(timezone=False)),
            Column('updated_at', DateTime(timezone=False)),
            Column('deleted_at', DateTime(timezone=False)),
            Column('deleted', Boolean(create_constraint=True, name=None),
                    default=False),
            Column('id', Integer(), primary_key=True, nullable=False),
            Column('instance_uuid', String(36, ForeignKey('instances.uuid'))),
            Column('code', Integer(), nullable=False),
            Column('message',
                   String(length=255, convert_unicode=False,
                          assert_unicode=None,
                          unicode_error=None, _warn_on_bytestring=False)),
            Column('details',
                   Text(length=None, convert_unicode=False,
                        assert_unicode=None,
                        unicode_error=None, _warn_on_bytestring=False)),
            )
    try:
        instance_faults.create()
    except Exception:
        LOG.info(repr(instance_faults))
def upgrade(migrate_engine):
    """Add default quota class data into DB."""
    meta = MetaData()
    meta.bind = migrate_engine

    quota_classes = Table('quota_classes', meta, autoload=True)

    rows = quota_classes.count().\
        where(quota_classes.c.class_name == 'default').execute().scalar()

    # Do not add entries if there are already 'default' entries.  We don't
    # want to write over something the user added.
    if rows:
        return

    # Set default volumes
    qci = quota_classes.insert()
    qci.execute({'created_at': CREATED_AT,
                 'class_name': CLASS_NAME,
                 'resource': 'volumes',
                 'hard_limit': CONF.quota_volumes,
                 'deleted': False, })
    # Set default snapshots
    qci.execute({'created_at': CREATED_AT,
                 'class_name': CLASS_NAME,
                 'resource': 'snapshots',
                 'hard_limit': CONF.quota_snapshots,
                 'deleted': False, })
    # Set default gigabytes
    qci.execute({'created_at': CREATED_AT,
                 'class_name': CLASS_NAME,
                 'resource': 'gigabytes',
                 'hard_limit': CONF.quota_gigabytes,
                 'deleted': False, })
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    columns = [
        (('created_at', DateTime), {}),
        (('updated_at', DateTime), {}),
        (('deleted_at', DateTime), {}),
        (('deleted', Integer), {}),
        (('id', Integer), dict(primary_key=True, nullable=False)),
        (('instance_uuid', String(length=36)), dict(nullable=False)),
        (('source_host', String(length=255)), dict(nullable=True)),
        (('dest_host', String(length=255)), dict(nullable=True)),
        (('dest_addr', String(length=255)), dict(nullable=True)),
        (('block_migration', Boolean), dict(nullable=True, default=False)),
        (('migrate_data', Text), dict(nullable=True)),
        ]

    for prefix in ('', 'shadow_'):
        basename = prefix + 'huawei_live_migrations'
        if migrate_engine.has_table(basename):
            continue
        _columns = tuple([Column(*args, **kwargs)
                          for args, kwargs in columns])
        table = Table(basename, meta, *_columns, mysql_engine='InnoDB',
                      mysql_charset='utf8')
        table.create()
def downgrade(migrate_engine):
    """Convert columns back to the larger String(255)."""
    meta = MetaData()
    meta.bind = migrate_engine
    for table, column in CIDR_TABLE_COLUMNS:
        t = Table(table, meta, autoload=True)
        getattr(t.c, column).alter(type=String(39))
Example #10
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    shares = Table('shares', meta, autoload=True)
    access_map = Table('share_access_map', meta, autoload=True)
    access_map.drop()
    shares.drop()
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    storage_pools = Table('storage_pools', meta, autoload=True)

    storage_pools.drop_column('cache_mode')
Example #12
0
def db_searchreplace(db_name, db_user, db_password, db_host, search, replace ):
    engine = create_engine("mysql://%s:%s@%s/%s" % (db_user, db_password, db_host, db_name ))
    #inspector = reflection.Inspector.from_engine(engine)
    #print inspector.get_table_names()
    meta = MetaData()
    meta.bind = engine
    meta.reflect()

    Session = sessionmaker(engine)


    Base = declarative_base(metadata=meta)
    session = Session()

    tableClassDict = {}
    for table_name, table_obj in dict.iteritems(Base.metadata.tables):
        try:
            tableClassDict[table_name] = type(str(table_name), (Base,), {'__tablename__': table_name, '__table_args__':{'autoload' : True, 'extend_existing': True} })
    #        class tempClass(Base):
    #            __tablename__ = table_name
    #            __table_args__ = {'autoload' : True, 'extend_existing': True}
    #            foo_id = Column(Integer, primary_key='temp')
            for row in session.query(tableClassDict[table_name]).all():
                for column in table_obj._columns.keys():
                    data_to_fix = getattr(row, column)
                    fixed_data = recursive_unserialize_replace( search, replace, data_to_fix, False)

                    setattr(row, column, fixed_data)
                    #print fixed_data
        except Exception, e:
            print e
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    services = Table('services', meta, autoload=True)
    services.drop_column('disabled_reason')
    shadow_services = Table('shadow_services', meta, autoload=True)
    shadow_services.drop_column('disabled_reason')
Example #14
0
File: utils.py Project: faye89/nova
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
                           **col_name_col_instance):
    """
    This method drops UC from table and works for mysql, postgresql and sqlite.
    In mysql and postgresql we are able to use "alter table" constuction. In
    sqlite is only one way to drop UC:
        1) Create new table with same columns, indexes and constraints
           (except one that we want to drop).
        2) Copy data from old table to new.
        3) Drop old table.
        4) Rename new table to the name of old table.

    :param migrate_engine: sqlalchemy engine
    :param table_name:     name of table that contains uniq constarint.
    :param uc_name:        name of uniq constraint that will be dropped.
    :param columns:        columns that are in uniq constarint.
    :param col_name_col_instance:   contains pair column_name=column_instance.
                            column_instance is instance of Column. These params
                            are required only for columns that have unsupported
                            types by sqlite. For example BigInteger.
    """
    if migrate_engine.name in ["mysql", "postgresql"]:
        meta = MetaData()
        meta.bind = migrate_engine
        t = Table(table_name, meta, autoload=True)
        uc = UniqueConstraint(*columns, table=t, name=uc_name)
        uc.drop()
    else:
        _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
                                          **col_name_col_instance)
def upgrade(migrate_engine):
    # Upgrade operations go here. Don't create your own engine;
    # bind migrate_engine to your metadata
    meta = MetaData()
    meta.bind = migrate_engine

    # load tables for fk
    instances = Table('instances', meta, autoload=True)

    #
    # New Tables
    #
    migrations = Table('migrations', meta,
                Column('created_at', DateTime(timezone=False)),
                Column('updated_at', DateTime(timezone=False)),
                Column('deleted_at', DateTime(timezone=False)),
                Column('deleted', Boolean(create_constraint=True, name=None)),
                Column('id', Integer(), primary_key=True, nullable=False),
                Column('source_compute', String(255)),
                Column('dest_compute', String(255)),
                Column('dest_host', String(255)),
                Column('instance_id', Integer, ForeignKey('instances.id'),
                    nullable=True),
                Column('status', String(255)),
          )

    for table in (migrations, ):
        try:
            table.create()
        except Exception:
            LOG.info(repr(table))
            LOG.exception('Exception while creating table')
            raise
def downgrade(migrate_engine):
    """Convert volume and snapshot id columns back to int."""
    meta = MetaData()
    meta.bind = migrate_engine

    volumes = Table('volumes', meta, autoload=True)
    snapshots = Table('snapshots', meta, autoload=True)
    iscsi_targets = Table('iscsi_targets', meta, autoload=True)
    volume_metadata = Table('volume_metadata', meta, autoload=True)
    block_device_mapping = Table('block_device_mapping', meta, autoload=True)
    sm_volumes = Table('sm_volume', meta, autoload=True)

    volume_mappings = Table('volume_id_mappings', meta, autoload=True)
    snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True)

    volume_list = list(volumes.select().execute())
    for v in volume_list:
        new_id = select([volume_mappings.c.id],
            volume_mappings.c.uuid == v['id'])

        volumes.update().\
            where(volumes.c.id == v['id']).\
            values(id=new_id).execute()

        sm_volumes.update().\
            where(sm_volumes.c.id == v['id']).\
            values(id=new_id).execute()

        snapshots.update().\
            where(snapshots.c.volume_id == v['id']).\
            values(volume_id=new_id).execute()

        iscsi_targets.update().\
            where(iscsi_targets.c.volume_id == v['id']).\
            values(volume_id=new_id).execute()

        volume_metadata.update().\
            where(volume_metadata.c.volume_id == v['id']).\
            values(volume_id=new_id).execute()

        block_device_mapping.update().\
            where(block_device_mapping.c.volume_id == v['id']).\
            values(volume_id=new_id).execute()

    snapshot_list = list(snapshots.select().execute())
    for s in snapshot_list:
        new_id = select([snapshot_mappings.c.id],
            volume_mappings.c.uuid == s['id'])

        volumes.update().\
            where(volumes.c.snapshot_id == s['id']).\
            values(snapshot_id=new_id).execute()

        snapshots.update().\
            where(snapshots.c.id == s['id']).\
            values(volume_id=new_id).execute()

        block_device_mapping.update().\
            where(block_device_mapping.c.snapshot_id == s['id']).\
            values(snapshot_id=new_id).execute()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    request_specs = Table('request_specs', meta, autoload=True)
    if request_specs.c.spec.type != api_models.MediumText():
        request_specs.c.spec.alter(type=api_models.MediumText())
Example #18
0
def _create_shadow_tables(migrate_engine):
    meta = MetaData(migrate_engine)
    meta.reflect(migrate_engine)
    table_names = meta.tables.keys()

    meta.bind = migrate_engine

    for table_name in table_names:
        table = Table(table_name, meta, autoload=True)

        columns = []
        for column in table.columns:
            column_copy = None
            # NOTE(boris-42): BigInteger is not supported by sqlite, so
            #                 after copy it will have NullType, other
            #                 types that are used in Nova are supported by
            #                 sqlite.
            if isinstance(column.type, NullType):
                column_copy = Column(column.name, BigInteger(), default=0)
            column_copy = column.copy()
            columns.append(column_copy)

        shadow_table_name = 'shadow_' + table_name
        shadow_table = Table(shadow_table_name, meta, *columns,
                             mysql_engine='InnoDB')
        try:
            shadow_table.create(checkfirst=True)
        except Exception:
            LOG.info(repr(shadow_table))
            LOG.exception(_('Exception while creating table.'))
            raise
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    instances = Table('instances', meta, autoload=True)

    data_present = check_instance_presence(migrate_engine, instances)

    if data_present and not FLAGS.connection_type:
        msg = ("Found instance records in database. You must specify "
               "connection_type to run migration migration")
        raise exception.Error(msg)

    instance_types = Table('instance_types', meta, autoload=True)

    for table in (instances, instance_types):
        root_gb = Column('root_gb', Integer)
        root_gb.create(table)
        ephemeral_gb = Column('ephemeral_gb', Integer)
        ephemeral_gb.create(table)

    # Since this migration is part of the work to get all drivers
    # working the same way, we need to treat the new root_gb and
    # ephemeral_gb columns differently depending on what the
    # driver implementation used to behave like.
    if FLAGS.connection_type == 'libvirt':
        upgrade_libvirt(instances, instance_types)
    else:
        upgrade_other(instances, instance_types)

    default_local_device = instances.c.default_local_device
    default_local_device.alter(name='default_ephemeral_device')

    for table in (instances, instance_types):
        table.drop_column('local_gb')
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    vifs = Table('virtual_interfaces', meta, autoload=True)
    network = Table('networks', meta, autoload=True)

    bw_usage_cache = Table('bw_usage_cache', meta,
                Column('created_at', DateTime(timezone=False)),
                Column('updated_at', DateTime(timezone=False)),
                Column('deleted_at', DateTime(timezone=False)),
                Column('deleted', Boolean(create_constraint=True, name=None)),
                Column('id', Integer(), primary_key=True, nullable=False),
                Column('instance_id', Integer(), nullable=False),
                Column('mac', String(255)),
                Column('start_period', DateTime(timezone=False),
                       nullable=False),
                Column('last_refreshed', DateTime(timezone=False)),
                Column('bw_in', BigInteger()),
                Column('bw_out', BigInteger()),
                useexisting=True)

    network_label_column = Column('network_label', String(255))
    bw_usage_cache.create_column(network_label_column)

    bw_usage_cache.update()\
        .values(network_label=select([network.c.label])\
            .where(and_(
                network.c.id == vifs.c.network_id,
               vifs.c.address == bw_usage_cache.c.mac,
               bw_usage_cache.c.instance_id == vifs.c.instance_id))\
            .as_scalar()).execute()

    bw_usage_cache.c.mac.drop()
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    instances = Table('instances', meta, autoload=True)
    instance_types = Table('instance_types', meta, autoload=True)

    for table in (instances, instance_types):
        local_gb = Column('local_gb', Integer)
        local_gb.create(table)

    try:
        for table in (instances, instance_types):
            if FLAGS.connection_type == 'libvirt':
                column = table.c.ephemeral_gb
            else:
                column = table.c.root_gb
            table.update().values(local_gb=column).execute()
    except Exception:
        for table in (instances, instance_types):
            table.drop_column('local_gb')
        raise

    default_ephemeral_device = instances.c.default_ephemeral_device
    default_ephemeral_device.alter(name='default_local_device')

    for table in (instances, instance_types):
        table.drop_column('root_gb')
        table.drop_column('ephemeral_gb')
Example #22
0
def check_shadow_table(migrate_engine, table_name):
    """This method checks that table with ``table_name`` and
    corresponding shadow table have same columns.
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
                         autoload=True)

    columns = {c.name: c for c in table.columns}
    shadow_columns = {c.name: c for c in shadow_table.columns}

    for name, column in columns.iteritems():
        if name not in shadow_columns:
            raise exception.NovaException(
                _("Missing column %(table)s.%(column)s in shadow table")
                        % {'column': name, 'table': shadow_table.name})
        shadow_column = shadow_columns[name]

        if not isinstance(shadow_column.type, type(column.type)):
            raise exception.NovaException(
                _("Different types in %(table)s.%(column)s and shadow table: "
                  "%(c_type)s %(shadow_c_type)s")
                        % {'column': name, 'table': table.name,
                           'c_type': column.type,
                           'shadow_c_type': shadow_column.type})

    for name, column in shadow_columns.iteritems():
        if name not in columns:
            raise exception.NovaException(
                _("Extra column %(table)s.%(column)s in shadow table")
                        % {'column': name, 'table': shadow_table.name})
    return True
Example #23
0
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
                           **col_name_col_instance):
    """Drop unique constraint from table.

    This method drops UC from table and works for mysql, postgresql and sqlite.
    In mysql and postgresql we are able to use "alter table" construction.
    Sqlalchemy doesn't support some sqlite column types and replaces their
    type with NullType in metadata. We process these columns and replace
    NullType with the correct column type.

    :param migrate_engine: sqlalchemy engine
    :param table_name:     name of table that contains uniq constraint.
    :param uc_name:        name of uniq constraint that will be dropped.
    :param columns:        columns that are in uniq constraint.
    :param col_name_col_instance:   contains pair column_name=column_instance.
                            column_instance is instance of Column. These params
                            are required only for columns that have unsupported
                            types by sqlite. For example BigInteger.
    """

    meta = MetaData()
    meta.bind = migrate_engine
    t = Table(table_name, meta, autoload=True)

    if migrate_engine.name == "sqlite":
        override_cols = [
            _get_not_supported_column(col_name_col_instance, col.name)
            for col in t.columns
            if isinstance(col.type, NullType)
        ]
        for col in override_cols:
            t.columns.replace(col)

    uc = UniqueConstraint(*columns, table=t, name=uc_name)
    uc.drop()
Example #24
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    volumes = Table('volumes', meta, autoload=True)

    # New table
    transfers = Table(
        'transfers', meta,
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean),
        Column('id', String(36), primary_key=True, nullable=False),
        Column('volume_id', String(length=36), ForeignKey('volumes.id'),
               nullable=False),
        Column('display_name', String(length=255)),
        Column('salt', String(length=255)),
        Column('crypt_hash', String(length=255)),
        Column('expires_at', DateTime(timezone=False)),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    try:
        transfers.create()
    except Exception:
        LOG.error(_("Table |%s| not created!"), repr(transfers))
        raise
Example #25
0
def upgrade(migrate_engine):
    """Add workers table."""
    meta = MetaData()
    meta.bind = migrate_engine

    workers = Table(
        'workers', meta,
        # Inherited fields from CinderBase
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(), default=False),

        # Workers table specific fields
        Column('id', Integer, primary_key=True),
        Column('resource_type', String(40), nullable=False),
        Column('resource_id', String(36), nullable=False),
        Column('status', String(255), nullable=False),
        Column('service_id', Integer, nullable=True),
        UniqueConstraint('resource_type', 'resource_id'),

        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    workers.create()

    services = Table('services', meta, autoload=True)

    ForeignKeyConstraint(
        columns=[workers.c.service_id],
        refcolumns=[services.c.id]).create()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    snapshots = Table('snapshots', meta, autoload=True)

    # New table
    snapshot_metadata = Table(
        'snapshot_metadata', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('snapshot_id', String(length=36), ForeignKey('snapshots.id'),
               nullable=False),
        Column('key', String(length=255)),
        Column('value', String(length=255)),
        mysql_engine='InnoDB'
    )

    try:
        snapshot_metadata.create()
    except Exception:
        LOG.error(_("Table |%s| not created!"), repr(snapshot_metadata))
        raise
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    shadow_table = Table('shadow_instances', meta, autoload=True)
    locked_by_column = getattr(shadow_table.c, 'locked_by')
    if str(locked_by_column.type).__contains__("SHADOW_INSTANCES0LOCKED_BY"):
        LOG.info("the shadow instance table need to convert.")
        shadow_table.drop()
        table = Table('instances', meta, autoload=True)
        columns = []
        for column in table.columns:
            if column.name == 'locked_by':
                enum = Enum('owner', 'admin',
                            name='instances0locked_by'.upper())
                column_copy = Column(column.name, enum)
            else:
                column_copy = column.copy()
            columns.append(column_copy)
        shadow_table_name = 'shadow_instances'
        shadow_table = Table(shadow_table_name, meta, *columns,
                             mysql_engine='InnoDB', extend_existing=True)
        shadow_table.create(checkfirst=True)
    else:
        LOG.info("the shadow instance table don't need to convert.")
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)

    virtual_interfaces.drop_column('uuid')
def upgrade(migrate_engine):
    """Add backup_metadata table."""

    meta = MetaData()
    meta.bind = migrate_engine

    Table('backups', meta, autoload=True)

    backup_metadata = Table(
        'backup_metadata', meta,
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(), default=False),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('backup_id', String(36),
               ForeignKey('backups.id'),
               nullable=False),
        Column('key', String(255)),
        Column('value', String(255)),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    backup_metadata.create()

    if not utils.index_exists_on_columns(migrate_engine,
                                         'backup_metadata',
                                         ['backup_id']):
        utils.add_index(migrate_engine,
                        'backup_metadata',
                        'backup_metadata_backup_id_idx',
                        ['backup_id'])
Example #30
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # create new table
    task_log = Table('task_log', meta,
            Column('created_at', DateTime(timezone=False)),
            Column('updated_at', DateTime(timezone=False)),
            Column('deleted_at', DateTime(timezone=False)),
            Column('deleted',
                    Boolean(create_constraint=True, name=None)),
            Column('id', Integer(),
                    primary_key=True,
                    nullable=False,
                    autoincrement=True),
            Column('task_name', String(255), nullable=False),
            Column('state', String(255), nullable=False),
            Column('host', String(255), index=True, nullable=False),
            Column('period_beginning', String(255),
                                       index=True, nullable=False),
            Column('period_ending', String(255), index=True, nullable=False),
            Column('message', String(255), nullable=False),
            Column('task_items', Integer()),
            Column('errors', Integer()),
            )
    try:
        task_log.create()
    except Exception:
        meta.drop_all(tables=[task_log])
        raise

    if migrate_engine.name == "mysql":
        migrate_engine.execute("ALTER TABLE task_log "
                "Engine=InnoDB")
Example #31
0
def DBInMemory_test():
    def rollback():
        with sd_lock:
            saveddata_session.rollback()

    print("Creating database in memory")
    from os.path import realpath, join, dirname, abspath

    debug = False
    gamedataCache = True
    saveddataCache = True
    gamedata_version = ""
    gamedata_connectionstring = 'sqlite:///' + realpath(
        join(dirname(abspath(unicode(__file__))), "..", "eve.db"))
    saveddata_connectionstring = 'sqlite:///:memory:'

    class ReadOnlyException(Exception):
        pass

    if callable(gamedata_connectionstring):
        gamedata_engine = create_engine("sqlite://",
                                        creator=gamedata_connectionstring,
                                        echo=debug)
    else:
        gamedata_engine = create_engine(gamedata_connectionstring, echo=debug)

    gamedata_meta = MetaData()
    gamedata_meta.bind = gamedata_engine
    gamedata_session = sessionmaker(bind=gamedata_engine,
                                    autoflush=False,
                                    expire_on_commit=False)()

    # This should be moved elsewhere, maybe as an actual query. Current, without try-except, it breaks when making a new
    # game db because we haven't reached gamedata_meta.create_all()
    try:
        gamedata_version = gamedata_session.execute(
            "SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'client_build'"
        ).fetchone()[0]
    except Exception as e:
        print("Missing gamedata version.")
        gamedata_version = None

    if saveddata_connectionstring is not None:
        if callable(saveddata_connectionstring):
            saveddata_engine = create_engine(
                creator=saveddata_connectionstring, echo=debug)
        else:
            saveddata_engine = create_engine(saveddata_connectionstring,
                                             echo=debug)

        saveddata_meta = MetaData()
        saveddata_meta.bind = saveddata_engine
        saveddata_session = sessionmaker(bind=saveddata_engine,
                                         autoflush=False,
                                         expire_on_commit=False)()
    else:
        saveddata_meta = None

    # Lock controlling any changes introduced to session
    sd_lock = threading.Lock()

    # Import all the definitions for all our database stuff
    # noinspection PyPep8
    #from eos.db.gamedata import alphaClones, attribute, category, effect, group, icon, item, marketGroup, metaData, metaGroup, queries, traits, unit
    # noinspection PyPep8
    #from eos.db.saveddata import booster, cargo, character, crest, damagePattern, databaseRepair, drone, fighter, fit, implant, implantSet, loadDefaultDatabaseValues, miscData, module, override, price, queries, skill, targetResists, user

    # If using in memory saveddata, you'll want to reflect it so the data structure is good.
    if saveddata_connectionstring == "sqlite:///:memory:":
        saveddata_meta.create_all()

    # Output debug info to help us troubleshoot Travis
    print(saveddata_engine)
    print(gamedata_engine)

    helper = {
        #'config': eos.config,
        'gamedata_session': gamedata_session,
        'saveddata_session': saveddata_session,
    }
    return helper
Example #32
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    alarm = Table('alarm', meta, autoload=True)
    alarm.c.meter_name.alter(name='counter_name')
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    agents = Table(
        'agents',
        meta,
        Column('id', Integer, primary_key=True, autoincrement=True),
        Column('created_at', DateTime),
        Column('project_id', String(length=255)),
        Column('updated_at', DateTime),
        Column('user_id', String(length=255)),
        Column('uuid', String(length=255), unique=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    queue_members = Table(
        'queue_members',
        meta,
        Column('id', Integer, primary_key=True, autoincrement=True),
        Column('created_at', DateTime),
        Column('agent_uuid', String(255)),
        Column('queue_uuid', String(255)),
        Column('updated_at', DateTime),
        ForeignKeyConstraint(
            ['agent_uuid'],
            ['agents.uuid'],
        ),
        ForeignKeyConstraint(
            ['queue_uuid'],
            ['queues.uuid'],
        ),
        UniqueConstraint('agent_uuid',
                         'queue_uuid',
                         name='uniq_queue_members0agent_uuid0queue_uuid'),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    queues = Table(
        'queues',
        meta,
        Column('id', Integer, primary_key=True, autoincrement=True),
        Column('created_at', DateTime),
        Column('description', Text),
        Column('disabled', Boolean),
        Column('name', String(length=80)),
        Column('project_id', String(length=255)),
        Column('updated_at', DateTime),
        Column('user_id', String(length=255)),
        Column('uuid', String(length=255), unique=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    tables = [agents, queues, queue_members]

    for table in tables:
        try:
            table.create()
        except Exception:
            LOG.info(repr(table))
            LOG.exception('Exception while creating table.')
            raise
def upgrade(migrate_engine):
    """This database upgrade will change the controllerfs table to now have
       one row per filesystem.
    """
    meta = MetaData()
    meta.bind = migrate_engine

    controller_fs = Table('controller_fs', meta, autoload=True)

    # Create new columns
    controller_fs.create_column(Column('name', String(64)))
    controller_fs.create_column(Column('size', Integer))
    controller_fs.create_column(Column('logical_volume', String(64)))
    controller_fs.create_column(Column('replicated', Boolean, default=False))

    # Get the first row
    fs = list(controller_fs.select().where(
        controller_fs.c.uuid is not None).execute())

    if len(fs) > 0:
        # If there is data in the table then migrate it
        database_gib = fs[0].database_gib
        cgcs_gib = fs[0].cgcs_gib
        img_conversions_gib = fs[0].img_conversions_gib
        backup_gib = fs[0].backup_gib
        scratch_gib = fs[0].scratch_gib
        forisystemid = fs[0].forisystemid

        LOG.info("Migrate the controllerfs table, database_gib=%s, "
                 "cgcs_gib=%s, img_conversions_gib=%s, backup_gib=%s, "
                 "scratch_gib=%s" %
                 (database_gib, cgcs_gib, img_conversions_gib, backup_gib,
                  scratch_gib))

        # Delete the original row
        controller_fs_delete = controller_fs.delete(
            controller_fs.c.uuid is not None)
        controller_fs_delete.execute()

        # Add the new rows
        if backup_gib > 0:
            controller_fs_insert = controller_fs.insert()  # pylint: disable=no-value-for-parameter
            controller_fs_uuid = str(uuid.uuid4())
            values = {
                'created_at': datetime.now(),
                'updated_at': None,
                'deleted_at': None,
                'uuid': controller_fs_uuid,
                'name': 'backup',
                'size': backup_gib,
                'replicated': False,
                'logical_volume': 'backup-lv',
                'forisystemid': forisystemid,
            }
            controller_fs_insert.execute(values)

        if cgcs_gib > 0:
            controller_fs_insert = controller_fs.insert()  # pylint: disable=no-value-for-parameter
            controller_fs_uuid = str(uuid.uuid4())
            values = {
                'created_at': datetime.now(),
                'updated_at': None,
                'deleted_at': None,
                'uuid': controller_fs_uuid,
                'name': 'cgcs',
                'size': cgcs_gib,
                'replicated': True,
                'logical_volume': 'cgcs-lv',
                'forisystemid': forisystemid,
            }
            controller_fs_insert.execute(values)

        if database_gib > 0:
            controller_fs_insert = controller_fs.insert()  # pylint: disable=no-value-for-parameter
            controller_fs_uuid = str(uuid.uuid4())
            values = {
                'created_at': datetime.now(),
                'updated_at': None,
                'deleted_at': None,
                'uuid': controller_fs_uuid,
                'name': 'database',
                'size': database_gib,
                'replicated': True,
                'logical_volume': 'pgsql-lv',
                'forisystemid': forisystemid,
            }
            controller_fs_insert.execute(values)

        if scratch_gib > 0:
            controller_fs_insert = controller_fs.insert()  # pylint: disable=no-value-for-parameter
            controller_fs_uuid = str(uuid.uuid4())
            values = {
                'created_at': datetime.now(),
                'updated_at': None,
                'deleted_at': None,
                'uuid': controller_fs_uuid,
                'name': 'scratch',
                'size': scratch_gib,
                'replicated': False,
                'logical_volume': 'scratch-lv',
                'forisystemid': forisystemid,
            }
            controller_fs_insert.execute(values)

        if img_conversions_gib > 0:
            controller_fs_insert = controller_fs.insert()  # pylint: disable=no-value-for-parameter
            controller_fs_uuid = str(uuid.uuid4())
            values = {
                'created_at': datetime.now(),
                'updated_at': None,
                'deleted_at': None,
                'uuid': controller_fs_uuid,
                'name': 'img-conversions',
                'size': img_conversions_gib,
                'replicated': False,
                'logical_volume': 'img-conversions-lv',
                'forisystemid': forisystemid,
            }
            controller_fs_insert.execute(values)

    # Drop the old columns
    controller_fs.drop_column('database_gib')
    controller_fs.drop_column('cgcs_gib')
    controller_fs.drop_column('img_conversions_gib')
    controller_fs.drop_column('backup_gib')
    controller_fs.drop_column('scratch_gib')
Example #35
0
 def metadata(self):
   m = MetaData()
   m.bind = self.engine
   return m
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    build_requests = Table('build_requests', meta, autoload=True)
    build_requests.c.instance.alter(type=api_models.MediumText())
Example #37
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # NOTE(dprince): The old dns_domains table is in the 'latin1'
    # charset and had its primary key length set to 512.
    # This is too long to be a valid pkey in the 'utf8' table charset
    # and is the root cause of errors like:
    #
    # 1) Dumping a database with mysqldump and trying to import it fails
    #    because this table is latin1 but fkeys to utf8 tables (projects).
    #
    # 2) Trying to alter the old dns_domains table fails with errors like:
    #    mysql> ALTER TABLE dns_domains DROP PRIMARY KEY;
    #    ERROR 1025 (HY000): Error on rename of './nova/#sql-6cf_855'....
    #
    # In short this table is just in a bad state. So... lets create a new one
    # with a shorter 'domain' column which is valid for the utf8 charset.
    # https://bugs.launchpad.net/nova/+bug/993663

    #rename old table
    dns_domains_old = Table('dns_domains', meta, autoload=True)
    dns_domains_old.rename(name='dns_domains_old')

    # NOTE(dprince): manually remove pkey/fkey for postgres
    if migrate_engine.name == "postgresql":
        sql = """ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
              dns_domains_pkey;
              ALTER TABLE ONLY dns_domains_old DROP CONSTRAINT
              dns_domains_project_id_fkey;"""
        migrate_engine.execute(sql)

    #Bind new metadata to avoid issues after the rename
    meta = MetaData()
    meta.bind = migrate_engine
    projects = Table('projects', meta, autoload=True)  # Required for fkey

    dns_domains_new = Table(
        'dns_domains',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('domain', String(length=255), nullable=False, primary_key=True),
        Column('scope', String(length=255)),
        Column('availability_zone', String(length=255)),
        Column('project_id', String(length=255), ForeignKey('projects.id')),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )
    dns_domains_new.create()

    dns_domains_old = Table('dns_domains_old', meta, autoload=True)
    record_list = list(dns_domains_old.select().execute())
    for rec in record_list:
        row = dns_domains_new.insert()
        row.execute({
            'created_at': rec['created_at'],
            'updated_at': rec['updated_at'],
            'deleted_at': rec['deleted_at'],
            'deleted': rec['deleted'],
            'domain': rec['domain'],
            'scope': rec['scope'],
            'availability_zone': rec['availability_zone'],
            'project_id': rec['project_id'],
        })

    dns_domains_old.drop()
Example #38
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    Table('i_system',
          meta,
          Column('id', Integer, primary_key=True, nullable=False),
          mysql_engine=ENGINE,
          mysql_charset=CHARSET)

    i_host = Table('i_host',
                   meta,
                   Column('id', Integer, primary_key=True, nullable=False),
                   mysql_engine=ENGINE,
                   mysql_charset=CHARSET,
                   autoload=True)

    if migrate_engine.url.get_dialect() is postgresql.dialect:
        old_provisionEnum = Enum('unprovisioned',
                                 'inventoried',
                                 'configured',
                                 'provisioned',
                                 'reserve1',
                                 'reserve2',
                                 name='invprovisionStateEnum')

        provisionEnum = Enum('unprovisioned',
                             'inventoried',
                             'configured',
                             'provisioning',
                             'provisioned',
                             'reserve1',
                             'reserve2',
                             name='invprovisionStateEnum')

        inv_provision_col = i_host.c.invprovision
        inv_provision_col.alter(Column('invprovision', String(60)))
        old_provisionEnum.drop(bind=migrate_engine, checkfirst=False)
        provisionEnum.create(bind=migrate_engine, checkfirst=False)
        migrate_engine.execute(
            'ALTER TABLE i_host ALTER COLUMN invprovision TYPE "invprovisionStateEnum" '
            'USING invprovision::text::"invprovisionStateEnum"')

    Table('i_node',
          meta,
          Column('id', Integer, primary_key=True, nullable=False),
          mysql_engine=ENGINE,
          mysql_charset=CHARSET)

    i_alarm_history = Table(
        'i_alarm_history',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(255), unique=True, index=True),
        Column('alarm_id', String(255), index=True),
        Column('alarm_state', String(255)),
        Column('entity_type_id', String(255), index=True),
        Column('entity_instance_id', String(255), index=True),
        Column('timestamp', DateTime(timezone=False)),
        Column('severity', String(255), index=True),
        Column('reason_text', String(255)),
        Column('alarm_type', String(255), index=True),
        Column('probable_cause', String(255)),
        Column('proposed_repair_action', String(255)),
        Column('service_affecting', Boolean),
        Column('suppression', Boolean),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    i_alarm_history.create()

    i_customer_log = Table(
        'i_customer_log',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(255), unique=True, index=True),
        Column('log_id', String(255), index=True),
        Column('entity_type_id', String(255), index=True),
        Column('entity_instance_id', String(255), index=True),
        Column('timestamp', DateTime(timezone=False)),
        Column('severity', String(255), index=True),
        Column('reason_text', String(255)),
        Column('log_type', String(255), index=True),
        Column('probable_cause', String(255)),
        Column('service_affecting', Boolean),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    i_customer_log.create()

    i_infra = Table(
        'i_infra',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('infra_subnet', String(255)),
        Column('infra_start', String(255)),
        Column('infra_end', String(255)),
        Column('forisystemid', Integer,
               ForeignKey('i_system.id', ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    i_infra.create()

    interfaces = Table(
        'interfaces',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('forihostid', Integer,
               ForeignKey('i_host.id', ondelete='CASCADE')),
        Column('iftype', String(255)),
        Column('ifname', String(255)),
        Column('networktype', String(255)),
        Column('sriov_numvfs', Integer),
        Column('ifcapabilities', Text),
        Column('farend', Text),
        UniqueConstraint('ifname', 'forihostid', name='u_interfacenameihost'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    interfaces.create()

    interfaces_to_interfaces = Table(
        'interfaces_to_interfaces',
        meta,
        Column("used_by_id",
               Integer,
               ForeignKey("interfaces.id", ondelete='CASCADE'),
               primary_key=True),
        Column("uses_id",
               Integer,
               ForeignKey("interfaces.id", ondelete='CASCADE'),
               primary_key=True),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    interfaces_to_interfaces.create()

    ethernet_interfaces = Table(
        'ethernet_interfaces',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('interfaces.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        Column('imac', String(255)),
        Column('imtu', Integer),
        Column('providernetworks', String(255)),
        Column('providernetworksdict', Text),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    ethernet_interfaces.create()

    ae_interfaces = Table(
        'ae_interfaces',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('interfaces.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        Column('aemode', String(255)),
        Column('aedict', Text),
        Column('txhashpolicy', String(255)),
        Column('schedpolicy', String(255)),
        Column('imac', String(255)),
        Column('imtu', Integer),
        Column('providernetworks', String(255)),
        Column('providernetworksdict', Text),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    ae_interfaces.create()

    vlan_interfaces = Table(
        'vlan_interfaces',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('interfaces.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        Column('vlan_id', String(255)),
        Column('vlan_type', String(255)),
        Column('imac', String(255)),
        Column('imtu', Integer),
        Column('providernetworks', String(255)),
        Column('providernetworksdict', Text),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    vlan_interfaces.create()

    ports = Table(
        'ports',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('host_id', Integer, ForeignKey('i_host.id',
                                              ondelete='CASCADE')),
        Column('node_id', Integer, ForeignKey('i_node.id',
                                              ondelete='SET NULL')),
        Column('interface_id', Integer,
               ForeignKey('interfaces.id', ondelete='SET NULL')),
        Column('type', String(255)),
        Column('name', String(255)),
        Column('namedisplay', String(255)),
        Column('pciaddr', String(255)),
        Column('dev_id', Integer),
        Column('sriov_totalvfs', Integer),
        Column('sriov_numvfs', Integer),
        Column('sriov_vfs_pci_address', String(1020)),
        Column('driver', String(255)),
        Column('pclass', String(255)),
        Column('pvendor', String(255)),
        Column('pdevice', String(255)),
        Column('psvendor', String(255)),
        Column('psdevice', String(255)),
        Column('dpdksupport', Boolean, default=False),
        Column('numa_node', Integer),
        Column('capabilities', Text),
        UniqueConstraint('pciaddr',
                         'dev_id',
                         'host_id',
                         name='u_pciaddr_dev_host_id'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    ports.create()

    ethernet_ports = Table(
        'ethernet_ports',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('ports.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        Column('mac', String(255)),
        Column('mtu', Integer),
        Column('speed', Integer),
        Column('link_mode', String(255)),
        Column('duplex', String(255)),
        Column('autoneg', String(255)),
        Column('bootp', String(255)),
        Column('capabilities', Text),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    ethernet_ports.create()

    address_pools = Table(
        'address_pools',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('name', String(128), unique=True, nullable=False),
        Column('family', Integer, nullable=False),
        Column('network', String(50), nullable=False),
        Column('prefix', Integer, nullable=False),
        Column('order', String(32), nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    address_pools.create()

    address_pool_ranges = Table(
        'address_pool_ranges',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('start', String(50), nullable=False),
        Column('end', String(50), nullable=False),
        Column('address_pool_id',
               Integer,
               ForeignKey('address_pools.id', ondelete="CASCADE"),
               nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    address_pool_ranges.create()

    addresses = Table(
        'addresses',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('name', String(255)),
        Column('family', Integer, nullable=False),
        Column('address', String(50), nullable=False),
        Column('prefix', Integer, nullable=False),
        Column('enable_dad', Boolean(), default=True),
        Column('interface_id',
               Integer,
               ForeignKey('interfaces.id', ondelete="CASCADE"),
               nullable=True),
        Column('address_pool_id',
               Integer,
               ForeignKey('address_pools.id', ondelete="CASCADE"),
               nullable=True),
        UniqueConstraint('family',
                         'address',
                         'interface_id',
                         name='u_address@family@interface'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    addresses.create()

    address_modes = Table(
        'address_modes',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('family', Integer, nullable=False),
        Column('mode', String(32), nullable=False),
        Column('interface_id',
               Integer,
               ForeignKey('interfaces.id', ondelete="CASCADE"),
               nullable=False),
        Column('address_pool_id',
               Integer,
               ForeignKey('address_pools.id', ondelete="CASCADE"),
               nullable=True),
        UniqueConstraint('family', 'interface_id', name='u_family@interface'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    address_modes.create()

    routes = Table(
        'routes',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('family', Integer, nullable=False),
        Column('network', String(50), nullable=False),
        Column('prefix', Integer, nullable=False),
        Column('gateway', String(50), nullable=False),
        Column('metric', Integer, default=1, nullable=False),
        Column('interface_id',
               Integer,
               ForeignKey('interfaces.id', ondelete="CASCADE"),
               nullable=False),
        UniqueConstraint('family',
                         'network',
                         'prefix',
                         'gateway',
                         'interface_id',
                         name='u_family@network@prefix@gateway@host'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    routes.create()

    networks = Table(
        'networks',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('type', String(255), unique=True),
        Column('mtu', Integer, nullable=False),
        Column('link_capacity', Integer),
        Column('dynamic', Boolean, nullable=False),
        Column('vlan_id', Integer),
        Column('address_pool_id',
               Integer,
               ForeignKey('address_pools.id', ondelete='CASCADE'),
               nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    networks.create()

    i_port = Table('i_port', meta, autoload=True)
    i_port.create_column(Column('sriov_totalvfs', Integer))
    i_port.create_column(Column('sriov_numvfs', Integer))
    i_port.create_column(Column('sriov_vfs_pci_address', String(1020)))
    i_port.create_column(Column('driver', String(255)))
    i_interface = Table('i_interface', meta, autoload=True)
    i_interface.create_column(Column('sriov_numvfs', Integer))

    i_port = Table('i_port', meta, autoload=True)
    i_port.create_column(Column('dpdksupport', Boolean, default=False))

    i_interface = Table('i_interface', meta, autoload=True)
    i_interface.create_column(Column('aedict', Text))

    pvTypeEnum = Enum('disk',
                      'partition',
                      'reserve1',
                      'reserve2',
                      native_enum=False,
                      name='physicalVolTypeEnum')

    pvStateEnum = Enum('unprovisioned',
                       'adding',
                       'provisioned',
                       'removing',
                       'reserve1',
                       'reserve2',
                       native_enum=False,
                       name='pvStateEnum')

    vgStateEnum = Enum('unprovisioned',
                       'adding',
                       'provisioned',
                       'removing',
                       'reserve1',
                       'reserve2',
                       native_enum=False,
                       name='vgStateEnum')

    i_lvg = Table(
        'i_lvg',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('vg_state', vgStateEnum, default="unprovisioned"),
        Column('lvm_vg_name', String(64)),
        Column('lvm_vg_uuid', String(64)),
        Column('lvm_vg_access', String(64)),
        Column('lvm_max_lv', Integer),
        Column('lvm_cur_lv', Integer),
        Column('lvm_max_pv', Integer),
        Column('lvm_cur_pv', Integer),
        Column('lvm_vg_size', BigInteger),
        Column('lvm_vg_total_pe', Integer),
        Column('lvm_vg_free_pe', Integer),
        Column('capabilities', Text),
        Column('forihostid', Integer,
               ForeignKey('i_host.id', ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    i_lvg.create()

    i_pv = Table(
        'i_pv',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('pv_state', pvStateEnum, default="unprovisioned"),
        Column('pv_type', pvTypeEnum, default="disk"),
        Column('idisk_uuid', String()),
        Column('idisk_device_node', String(64)),
        Column('lvm_pv_name', String(64)),
        Column('lvm_vg_name', String(64)),
        Column('lvm_pv_uuid', String(64)),
        Column('lvm_pv_size', BigInteger),
        Column('lvm_pe_total', Integer),
        Column('lvm_pe_alloced', Integer),
        Column('capabilities', Text),
        Column('forihostid', Integer,
               ForeignKey('i_host.id', ondelete='CASCADE')),
        Column('forilvgid', Integer, ForeignKey('i_lvg.id',
                                                ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    i_pv.create()

    i_idisk = Table('i_idisk', meta, autoload=True)
    foripvid = Column('foripvid', Integer, ForeignKey('i_pv.id'))
    foripvid.create(i_idisk)

    sensorgroups = Table(
        'i_sensorgroups',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('host_id', Integer, ForeignKey('i_host.id',
                                              ondelete='CASCADE')),
        Column('sensorgroupname', String(255)),
        Column('path', String(255)),
        Column('datatype', String(255)),  # polymorphic 'analog'/'discrete
        Column('sensortype', String(255)),
        Column('description', String(255)),
        Column('state', String(255)),  # enabled or disabled
        Column('possible_states', String(255)),
        Column('audit_interval_group', Integer),
        Column('record_ttl', Integer),
        Column('algorithm', String(255)),
        Column('actions_critical_choices', String(255)),
        Column('actions_major_choices', String(255)),
        Column('actions_minor_choices', String(255)),
        Column('actions_minor_group', String(255)),
        Column('actions_major_group', String(255)),
        Column('actions_critical_group', String(255)),
        Column('suppress', Boolean),  # True, disables the action
        Column('capabilities', Text),
        UniqueConstraint('sensorgroupname',
                         'path',
                         'host_id',
                         name='u_sensorgroupname_path_hostid'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    sensorgroups.create()

    # polymorphic on datatype 'discrete'
    sensorgroups_discrete = Table(
        'i_sensorgroups_discrete',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('i_sensorgroups.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    sensorgroups_discrete.create()

    # polymorphic on datatype 'analog'
    sensorgroups_analog = Table(
        'i_sensorgroups_analog',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('i_sensorgroups.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        Column('unit_base_group', String(255)),  # revolutions
        Column('unit_modifier_group', String(255)),  # 100
        Column('unit_rate_group', String(255)),  # minute
        Column('t_minor_lower_group', String(255)),
        Column('t_minor_upper_group', String(255)),
        Column('t_major_lower_group', String(255)),
        Column('t_major_upper_group', String(255)),
        Column('t_critical_lower_group', String(255)),
        Column('t_critical_upper_group', String(255)),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    sensorgroups_analog.create()

    sensors = Table(
        'i_sensors',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('host_id', Integer, ForeignKey('i_host.id',
                                              ondelete='CASCADE')),
        Column('sensorgroup_id', Integer,
               ForeignKey('i_sensorgroups.id', ondelete='SET NULL')),
        Column('sensorname', String(255)),
        Column('path', String(255)),
        Column('datatype', String(255)),  # polymorphic on datatype
        Column('sensortype', String(255)),
        Column('status', String(255)),  # ok, minor, major, critical, disabled
        Column('state', String(255)),  # enabled, disabled
        Column('state_requested', String(255)),
        Column('sensor_action_requested', String(255)),
        Column('audit_interval', Integer),
        Column('algorithm', String(255)),
        Column('actions_minor', String(255)),
        Column('actions_major', String(255)),
        Column('actions_critical', String(255)),
        Column('suppress', Boolean),  # True, disables the action
        Column('capabilities', Text),
        UniqueConstraint('sensorname',
                         'path',
                         'host_id',
                         name='u_sensorname_path_host_id'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    sensors.create()

    # discrete sensor
    sensors_discrete = Table(
        'i_sensors_discrete',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('i_sensors.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    sensors_discrete.create()

    # analog sensor
    sensors_analog = Table(
        'i_sensors_analog',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('i_sensors.id', ondelete="CASCADE"),
               primary_key=True,
               nullable=False),
        Column('unit_base', String(255)),  # revolutions
        Column('unit_modifier', String(255)),  # 10^2
        Column('unit_rate', String(255)),  # minute
        Column('t_minor_lower', String(255)),
        Column('t_minor_upper', String(255)),
        Column('t_major_lower', String(255)),
        Column('t_major_upper', String(255)),
        Column('t_critical_lower', String(255)),
        Column('t_critical_upper', String(255)),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    sensors_analog.create()

    pci_devices = Table(
        'pci_devices',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(255), unique=True, index=True),
        Column('host_id', Integer, ForeignKey('i_host.id',
                                              ondelete='CASCADE')),
        Column('name', String(255)),
        Column('pciaddr', String(255)),
        Column('pclass_id', String(6)),
        Column('pvendor_id', String(4)),
        Column('pdevice_id', String(4)),
        Column('pclass', String(255)),
        Column('pvendor', String(255)),
        Column('pdevice', String(255)),
        Column('psvendor', String(255)),
        Column('psdevice', String(255)),
        Column('numa_node', Integer),
        Column('driver', String(255)),
        Column('sriov_totalvfs', Integer),
        Column('sriov_numvfs', Integer),
        Column('sriov_vfs_pci_address', String(1020)),
        Column('enabled', Boolean),
        Column('extra_info', Text),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    pci_devices.create()

    loads = Table(
        'loads',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36)),
        Column('state', String(255)),
        Column('software_version', String(255)),
        Column('compatible_version', String(255)),
        Column('required_patches', String(2047)),
        UniqueConstraint('software_version'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    loads.create()

    # loads = Table('loads', meta, Column('id', Integer, primary_key=True,
    #                                     nullable=False))
    software_upgrade = Table(
        'software_upgrade',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('state', String(128), nullable=False),
        Column('from_load',
               Integer,
               ForeignKey('loads.id', ondelete="CASCADE"),
               nullable=False),
        Column('to_load',
               Integer,
               ForeignKey('loads.id', ondelete="CASCADE"),
               nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    software_upgrade.create()

    host_upgrade = Table(
        'host_upgrade',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('forihostid', Integer,
               ForeignKey('i_host.id', ondelete='CASCADE')),
        Column('software_load',
               Integer,
               ForeignKey('loads.id'),
               nullable=False),
        Column('target_load', Integer, ForeignKey('loads.id'), nullable=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    host_upgrade.create()

    drbdconfig = Table(
        'drbdconfig',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('link_util', Integer),
        Column('num_parallel', Integer),
        Column('rtt_ms', Float),
        Column('forisystemid', Integer,
               ForeignKey('i_system.id', ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    drbdconfig.create()

    i_host.create_column(Column('ihost_action', String(255)))
    i_host.create_column(Column('vim_progress_status', String(255)))
    i_host.create_column(Column('subfunctions', String(255)))
    i_host.create_column(
        Column('subfunction_oper', String(255), default="disabled"))
    i_host.create_column(
        Column('subfunction_avail', String(255), default="not-installed"))
    i_host.create_column(Column('boot_device', String(255)))
    i_host.create_column(Column('rootfs_device', String(255)))
    i_host.create_column(Column('install_output', String(255)))
    i_host.create_column(Column('console', String(255)))
    i_host.create_column(Column('vsc_controllers', String(255)))
    i_host.create_column(Column('ttys_dcd', Boolean))

    # 005_add_hugepage_attributes.py
    i_memory = Table('i_imemory', meta, autoload=True)
    i_memory.drop_column('vm_hugepages_size_mib')
    i_memory.drop_column('vm_hugepages_nr')
    i_memory.drop_column('vm_hugepages_avail')

    i_memory.create_column(Column('vm_hugepages_nr_2M', Integer))
    i_memory.create_column(Column('vm_hugepages_nr_1G', Integer))
    i_memory.create_column(Column('vm_hugepages_use_1G', Boolean))
    i_memory.create_column(Column('vm_hugepages_possible_2M', Integer))
    i_memory.create_column(Column('vm_hugepages_possible_1G', Integer))
    # 012_hugepage_enhancements.py
    i_memory.create_column(Column('vm_hugepages_nr_2M_pending', Integer))
    i_memory.create_column(Column('vm_hugepages_nr_1G_pending', Integer))
    i_memory.create_column(Column('vm_hugepages_avail_2M', Integer))
    i_memory.create_column(Column('vm_hugepages_avail_1G', Integer))
    # 014_hugepage_4K_memory.py
    i_memory.create_column(Column('vm_hugepages_nr_4K', Integer))
    # 016_compute_memory.py
    i_memory.create_column(Column('node_memtotal_mib', Integer))

    i_extoam = Table('i_extoam', meta, autoload=True)
    i_extoam.create_column(Column('oam_start_ip', String(255)))
    i_extoam.create_column(Column('oam_end_ip', String(255)))

    i_storconfig = Table('i_storconfig', meta, autoload=True)
    i_storconfig.create_column(Column('glance_backend', String(255)))
    i_storconfig.create_column(Column('glance_gib', Integer, default=0))
    i_storconfig.create_column(Column('img_conversions_gib', String(255)))

    table_names = ['i_extoam', 'i_infra']
    for name in table_names:
        table = Table(name, meta, autoload=True)
        table.drop()

    serviceEnum = Enum('identity', name='serviceEnum')

    service_parameter = Table(
        'service_parameter',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('service', serviceEnum),
        Column('section', String(255)),
        Column('name', String(255)),
        Column('value', String(255)),
        UniqueConstraint('service',
                         'section',
                         'name',
                         name='u_servicesectionname'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    service_parameter.create()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    # grab tables
    instance_info_caches = Table('instance_info_caches', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    vifs = Table('virtual_interfaces', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    floating_ips = Table('floating_ips', meta, autoload=True)

    # all of these functions return a python list of python dicts
    # that have nothing to do with sqlalchemy objects whatsoever
    # after returning
    def get_instances():
        # want all instances whether there is network info or not
        s = select([instances.c.id, instances.c.uuid])
        keys = ('id', 'uuid')

        return [dict(zip(keys, row)) for row in s.execute()]

    def get_vifs_by_instance_id(instance_id):
        s = select([vifs.c.id, vifs.c.uuid, vifs.c.address, vifs.c.network_id],
                   vifs.c.instance_id == instance_id)
        keys = ('id', 'uuid', 'address', 'network_id')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_network_by_id(network_id):
        s = select([
            networks.c.uuid, networks.c.label, networks.c.project_id,
            networks.c.dns1, networks.c.dns2, networks.c.cidr,
            networks.c.cidr_v6, networks.c.gateway, networks.c.gateway_v6,
            networks.c.injected, networks.c.multi_host, networks.c.bridge,
            networks.c.bridge_interface, networks.c.vlan
        ], networks.c.id == network_id)
        keys = ('uuid', 'label', 'project_id', 'dns1', 'dns2', 'cidr',
                'cidr_v6', 'gateway', 'gateway_v6', 'injected', 'multi_host',
                'bridge', 'bridge_interface', 'vlan')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_fixed_ips_by_vif_id(vif_id):
        s = select([fixed_ips.c.id, fixed_ips.c.address],
                   fixed_ips.c.virtual_interface_id == vif_id)
        keys = ('id', 'address')
        fixed_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        # fixed ips have floating ips, so here they are
        for fixed_ip in fixed_ip_list:
            fixed_ip['version'] = 4
            fixed_ip['floating_ips'] = get_floating_ips_by_fixed_ip_id(
                fixed_ip['id'])
            fixed_ip['type'] = 'fixed'
            del fixed_ip['id']

        return fixed_ip_list

    def get_floating_ips_by_fixed_ip_id(fixed_ip_id):
        s = select([floating_ips.c.address],
                   floating_ips.c.fixed_ip_id == fixed_ip_id)
        keys = ('address')
        floating_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        for floating_ip in floating_ip_list:
            floating_ip['version'] = 4
            floating_ip['type'] = 'floating'

        return floating_ip_list

    def _ip_dict_from_string(ip_string, type):
        if ip_string:
            ip = {'address': ip_string, 'type': type}
            if ':' in ip_string:
                ip['version'] = 6
            else:
                ip['version'] = 4

            return ip

    def _get_fixed_ipv6_dict(cidr, mac, project_id):
        ip_string = ipv6.to_global(cidr, mac, project_id)
        return {'version': 6, 'address': ip_string, 'floating_ips': []}

    def _create_subnet(version, network, vif):
        if version == 4:
            cidr = network['cidr']
            gateway = network['gateway']
            ips = get_fixed_ips_by_vif_id(vif['id'])
        else:
            cidr = network['cidr_v6']
            gateway = network['gateway_v6']
            ips = [
                _get_fixed_ipv6_dict(network['cidr_v6'], vif['address'],
                                     network['project_id'])
            ]

        # NOTE(tr3buchet) routes is left empty for now because there
        # is no good way to generate them or determine which is default
        subnet = {
            'version': version,
            'cidr': cidr,
            'dns': [],
            'gateway': _ip_dict_from_string(gateway, 'gateway'),
            'routes': [],
            'ips': ips
        }

        if network['dns1'] and network['dns1']['version'] == version:
            subnet['dns'].append(network['dns1'])
        if network['dns2'] and network['dns2']['version'] == version:
            subnet['dns'].append(network['dns2'])

        return subnet

    def _update_network(vif, network):
        # vifs have a network which has subnets, so create the subnets
        # subnets contain all of the ip information
        network['subnets'] = []

        network['dns1'] = _ip_dict_from_string(network['dns1'], 'dns')
        network['dns2'] = _ip_dict_from_string(network['dns2'], 'dns')

        # nova networks can only have 2 subnets
        if network['cidr']:
            network['subnets'].append(_create_subnet(4, network, vif))
        if network['cidr_v6']:
            network['subnets'].append(_create_subnet(6, network, vif))

        # put network together to fit model
        network['id'] = network.pop('uuid')
        network['meta'] = {}

        # NOTE(tr3buchet) this isn't absolutely necessary as hydration
        # would still work with these as keys, but cache generated by
        # the model would show these keys as a part of meta. i went
        # ahead and set it up the same way just so it looks the same
        if network['project_id']:
            network['meta']['project_id'] = network['project_id']
        del network['project_id']
        if network['injected']:
            network['meta']['injected'] = network['injected']
        del network['injected']
        if network['multi_host']:
            network['meta']['multi_host'] = network['multi_host']
        del network['multi_host']
        if network['bridge_interface']:
            network['meta']['bridge_interface'] = network['bridge_interface']
        del network['bridge_interface']
        if network['vlan']:
            network['meta']['vlan'] = network['vlan']
        del network['vlan']

        # ip information now lives in the subnet, pull them out of network
        del network['dns1']
        del network['dns2']
        del network['cidr']
        del network['cidr_v6']
        del network['gateway']
        del network['gateway_v6']

        # don't need meta if it's empty
        if not network['meta']:
            del network['meta']

    # preload caches table
    # list is made up of a row(instance_id, nw_info_json) for each instance
    for instance in get_instances():
        LOG.info("Updating %s" % (instance['uuid']))
        instance_id = instance['id']
        instance_uuid = instance['uuid']

        # instances have vifs so aninstance nw_info is
        # is a list of dicts, 1 dict for each vif
        nw_info = get_vifs_by_instance_id(instance_id)
        LOG.info("VIFs for Instance %s: \n %s" % (instance['uuid'], nw_info))
        for vif in nw_info:
            networks_ = get_network_by_id(vif['network_id'])
            if networks_:
                network = networks_[0]
                LOG.info("Network for Instance %s: \n %s" %
                         (instance['uuid'], network))
                _update_network(vif, network)
            else:
                network = None

            # put vif together to fit model
            del vif['network_id']
            vif['id'] = vif.pop('uuid')
            vif['network'] = network
            # vif['meta'] could also be set to contain rxtx data here
            # but it isn't exposed in the api and is still being rewritten

            LOG.info("VIF network for instance %s: \n %s" %
                     (instance['uuid'], vif['network']))

        # jsonify nw_info
        row = {
            'created_at': utils.utcnow(),
            'updated_at': utils.utcnow(),
            'instance_id': instance_uuid,
            'network_info': json.dumps(nw_info)
        }

        # write write row to table
        insert = instance_info_caches.insert().values(**row)
        migrate_engine.execute(insert)
Example #40
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    groups = Table(
        'instance_groups',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Integer),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('user_id', String(length=255)),
        Column('project_id', String(length=255)),
        Column('uuid', String(length=36), nullable=False),
        Column('name', String(length=255)),
        UniqueConstraint('uuid',
                         'deleted',
                         name='uniq_instance_groups0uuid0deleted'),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    group_metadata = Table(
        'instance_group_metadata',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Integer),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('key', String(length=255)),
        Column('value', String(length=255)),
        Column('group_id',
               Integer,
               ForeignKey('instance_groups.id'),
               nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    group_policy = Table(
        'instance_group_policy',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Integer),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('policy', String(length=255)),
        Column('group_id',
               Integer,
               ForeignKey('instance_groups.id'),
               nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    group_member = Table(
        'instance_group_member',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Integer),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('instance_id', String(length=255)),
        Column('group_id',
               Integer,
               ForeignKey('instance_groups.id'),
               nullable=False),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    tables = [groups, group_metadata, group_policy, group_member]

    # create all of the tables
    for table in tables:
        table.create()
        utils.create_shadow_table(migrate_engine, table=table)

    indexes = [
        Index('instance_group_metadata_key_idx', group_metadata.c.key),
        Index('instance_group_member_instance_idx',
              group_member.c.instance_id),
        Index('instance_group_policy_policy_idx', group_policy.c.policy)
    ]

    # Common indexes
    if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql':
        for index in indexes:
            index.create(migrate_engine)
Example #41
0
 def test_create_shadow_both_table_and_table_name_are_none(self):
     for key, engine in self.engines.items():
         meta = MetaData()
         meta.bind = engine
         self.assertRaises(exception.NovaException,
                           utils.create_shadow_table, engine)
Example #42
0
from config.database_setting import DB_CONFIG as DB_CONFIG

CONNECTION_URL = "mysql+pymysql://%s:%s@%s:%s/%s?charset=%s" % \
                 (
                     DB_CONFIG["username"], DB_CONFIG["password"], DB_CONFIG["host"],
                     DB_CONFIG["port"], DB_CONFIG["database"], DB_CONFIG["charset"]
                 )

engine = create_engine(
    CONNECTION_URL,
    json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False),
    pool_recycle=1000,
    pool_size=200)
metadata = MetaData()
metadata.bind = engine
Session = scoped_session(
    sessionmaker(autocommit=False,
                 autoflush=False,
                 expire_on_commit=False,
                 bind=engine))
Base = declarative_base()
Base.query = Session.query_property()


class OrmBase(object):
    def save(self, commit=True, refresh=False):
        session = Session()
        session.add(self)
        if commit:
            session.commit()
Example #43
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    profile = Table('profile',
                    meta,
                    Column('id',
                           String(length=36),
                           primary_key=True,
                           nullable=False),
                    Column('name', String(length=255)),
                    Column('type', String(length=255)),
                    Column('context', types.Dict),
                    Column('spec', types.Dict),
                    Column('user', String(length=32), nullable=False),
                    Column('project', String(length=32), nullable=False),
                    Column('domain', String(length=32)),
                    Column('permission', String(length=32)),
                    Column('meta_data', types.Dict),
                    Column('created_at', DateTime),
                    Column('updated_at', DateTime),
                    mysql_engine='InnoDB',
                    mysql_charset='utf8')

    cluster = Table('cluster',
                    meta,
                    Column('id', String(36), primary_key=True, nullable=False),
                    Column('name', String(255), nullable=False),
                    Column('profile_id',
                           String(36),
                           ForeignKey('profile.id'),
                           nullable=False),
                    Column('user', String(32), nullable=False),
                    Column('project', String(32), nullable=False),
                    Column('domain', String(32)),
                    Column('parent', String(36)),
                    Column('init_at', DateTime),
                    Column('created_at', DateTime),
                    Column('updated_at', DateTime),
                    Column('min_size', Integer),
                    Column('max_size', Integer),
                    Column('desired_capacity', Integer),
                    Column('next_index', Integer),
                    Column('timeout', Integer),
                    Column('status', String(255)),
                    Column('status_reason', Text),
                    Column('meta_data', types.Dict),
                    Column('data', types.Dict),
                    mysql_engine='InnoDB',
                    mysql_charset='utf8')

    node = Table('node',
                 meta,
                 Column('id', String(36), primary_key=True, nullable=False),
                 Column('name', String(255)),
                 Column('physical_id', String(36)),
                 Column('cluster_id', String(36)),
                 Column('profile_id',
                        String(36),
                        ForeignKey('profile.id'),
                        nullable=False),
                 Column('user', String(32)),
                 Column('project', String(32)),
                 Column('domain', String(32)),
                 Column('index', Integer),
                 Column('role', String(64)),
                 Column('init_at', DateTime),
                 Column('created_at', DateTime),
                 Column('updated_at', DateTime),
                 Column('status', String(255)),
                 Column('status_reason', Text),
                 Column('meta_data', types.Dict),
                 Column('data', types.Dict),
                 mysql_engine='InnoDB',
                 mysql_charset='utf8')

    cluster_lock = Table('cluster_lock',
                         meta,
                         Column('cluster_id',
                                String(36),
                                primary_key=True,
                                nullable=False),
                         Column('action_ids', types.List),
                         Column('semaphore', Integer),
                         mysql_engine='InnoDB',
                         mysql_charset='utf8')

    node_lock = Table('node_lock',
                      meta,
                      Column('node_id',
                             String(36),
                             primary_key=True,
                             nullable=False),
                      Column('action_id', String(36)),
                      mysql_engine='InnoDB',
                      mysql_charset='utf8')

    policy = Table('policy',
                   meta,
                   Column('id', String(36), primary_key=True, nullable=False),
                   Column('name', String(255)),
                   Column('type', String(255)),
                   Column('user', String(32), nullable=False),
                   Column('project', String(32), nullable=False),
                   Column('domain', String(32)),
                   Column('cooldown', Integer),
                   Column('level', Integer),
                   Column('created_at', DateTime),
                   Column('updated_at', DateTime),
                   Column('spec', types.Dict),
                   Column('data', types.Dict),
                   mysql_engine='InnoDB',
                   mysql_charset='utf8')

    cluster_policy = Table('cluster_policy',
                           meta,
                           Column('id',
                                  String(36),
                                  primary_key=True,
                                  nullable=False),
                           Column('cluster_id',
                                  String(36),
                                  ForeignKey('cluster.id'),
                                  nullable=False),
                           Column('policy_id',
                                  String(36),
                                  ForeignKey('policy.id'),
                                  nullable=False),
                           Column('cooldown', Integer),
                           Column('priority', Integer),
                           Column('level', Integer),
                           Column('enabled', Boolean),
                           Column('data', types.Dict),
                           Column('last_op', DateTime),
                           mysql_engine='InnoDB',
                           mysql_charset='utf8')

    receiver = Table('receiver',
                     meta,
                     Column('id', String(36), primary_key=True,
                            nullable=False),
                     Column('name', String(255)),
                     Column('type', String(255)),
                     Column('user', String(32)),
                     Column('project', String(32)),
                     Column('domain', String(32)),
                     Column('created_at', DateTime),
                     Column('updated_at', DateTime),
                     Column('cluster_id', String(36)),
                     Column('actor', types.Dict),
                     Column('action', Text),
                     Column('params', types.Dict),
                     Column('channel', types.Dict),
                     mysql_engine='InnoDB',
                     mysql_charset='utf8')

    credential = Table('credential',
                       meta,
                       Column('user',
                              String(32),
                              primary_key=True,
                              nullable=False),
                       Column('project',
                              String(32),
                              primary_key=True,
                              nullable=False),
                       Column('cred', types.Dict, nullable=False),
                       Column('data', types.Dict),
                       mysql_engine='InnoDB',
                       mysql_charset='utf8')

    action = Table(
        'action',
        meta,
        Column('id', String(36), primary_key=True, nullable=False),
        Column('name', String(63)),
        Column('context', types.Dict),
        Column('target', String(36)),
        Column('action', Text),
        Column('cause', String(255)),
        Column('owner', String(36)),
        Column('interval', Integer),
        # FIXME: Don't specify fixed precision.
        Column('start_time', Float(precision='24,8')),
        Column('end_time', Float(precision='24,8')),
        Column('timeout', Integer),
        Column('control', String(255)),
        Column('status', String(255)),
        Column('status_reason', Text),
        Column('inputs', types.Dict),
        Column('outputs', types.Dict),
        Column('depends_on', types.List),
        Column('depended_by', types.List),
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('data', types.Dict),
        mysql_engine='InnoDB',
        mysql_charset='utf8')

    dependency = Table('dependency',
                       meta,
                       Column('id',
                              String(36),
                              nullable=False,
                              primary_key=True),
                       Column('depended',
                              String(36),
                              ForeignKey('action.id'),
                              nullable=False),
                       Column('dependent',
                              String(36),
                              ForeignKey('action.id'),
                              nullable=False),
                       mysql_engine='InnoDB',
                       mysql_charset='utf8')

    event = Table('event',
                  meta,
                  Column('id', String(36), primary_key=True, nullable=False),
                  Column('timestamp', DateTime, nullable=False),
                  Column('obj_id', String(36)),
                  Column('obj_name', String(255)),
                  Column('obj_type', String(36)),
                  Column('cluster_id', String(36)),
                  Column('level', String(63)),
                  Column('user', String(32)),
                  Column('project', String(32)),
                  Column('action', String(36)),
                  Column('status', String(255)),
                  Column('status_reason', Text),
                  Column('meta_data', types.Dict),
                  mysql_engine='InnoDB',
                  mysql_charset='utf8')

    tables = (
        profile,
        cluster,
        node,
        cluster_lock,
        node_lock,
        policy,
        cluster_policy,
        credential,
        action,
        dependency,
        receiver,
        event,
    )

    for index, table in enumerate(tables):
        try:
            table.create()
        except Exception:
            # If an error occurs, drop all tables created so far to return
            # to the previously existing state.
            meta.drop_all(tables=tables[:index])
            raise
Example #44
0
class ReadOnlyException(Exception):
    pass


pyfalog.debug('Initializing gamedata')
gamedata_connectionstring = config.gamedata_connectionstring
if callable(gamedata_connectionstring):
    gamedata_engine = create_engine("sqlite://",
                                    creator=gamedata_connectionstring,
                                    echo=config.debug)
else:
    gamedata_engine = create_engine(gamedata_connectionstring,
                                    echo=config.debug)

gamedata_meta = MetaData()
gamedata_meta.bind = gamedata_engine
gamedata_session = sessionmaker(bind=gamedata_engine,
                                autoflush=False,
                                expire_on_commit=False)()

pyfalog.debug('Getting gamedata version')
# This should be moved elsewhere, maybe as an actual query. Current, without try-except, it breaks when making a new
# game db because we haven't reached gamedata_meta.create_all()
try:
    config.gamedata_version = gamedata_session.execute(
        "SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'client_build'"
    ).fetchone()[0]
    config.gamedata_date = gamedata_session.execute(
        "SELECT `field_value` FROM `metadata` WHERE `field_name` LIKE 'dump_time'"
    ).fetchone()[0]
except (KeyboardInterrupt, SystemExit):
def upgrade(migrate_engine):
    """This database upgrade replaces the i_istorconfig table with five
       tables: controller_fs, storage_backend, storage_ceph, storage_lvm,
       ceph_mon.
    """

    meta = MetaData()
    meta.bind = migrate_engine
    conn = migrate_engine.connect()

    i_host = Table('i_host', meta, autoload=True)
    i_system = Table('i_system', meta, autoload=True)

    # Define and create the controller_fs table.
    controller_fs = Table(
        'controller_fs',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('database_gib', Integer),
        Column('cgcs_gib', Integer),
        Column('img_conversions_gib', Integer),
        Column('backup_gib', Integer),
        Column('forisystemid', Integer,
               ForeignKey(i_system.c.id, ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )

    controller_fs.create()

    # Define and create the storage_backend table.
    storage_backend = Table(
        'storage_backend',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('backend', String(255)),
        Column('state', String(255)),
        Column('task', String(255)),
        Column('forisystemid', Integer,
               ForeignKey(i_system.c.id, ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )

    storage_backend.create()

    # Define and create the storage_lvm table.
    storage_lvm = Table(
        'storage_lvm',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('storage_backend.id', ondelete="CASCADE"),
               primary_key=True,
               unique=True,
               nullable=False),
        Column('cinder_device', String(255)),
        Column('cinder_gib', Integer),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )

    storage_lvm.create()

    # Define and create the storage_ceph table.
    storage_ceph = Table(
        'storage_ceph',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id',
               Integer,
               ForeignKey('storage_backend.id', ondelete="CASCADE"),
               primary_key=True,
               unique=True,
               nullable=False),
        Column('cinder_pool_gib', Integer),
        Column('glance_pool_gib', Integer),
        Column('ephemeral_pool_gib', Integer),
        Column('object_pool_gib', Integer),
        Column('object_gateway', Boolean, default=False),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )

    storage_ceph.create()

    # Define and create the ceph_mon table.
    ceph_mon = Table(
        'ceph_mon',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('device_node', String(255)),
        Column('ceph_mon_gib', Integer),
        Column('forihostid', Integer,
               ForeignKey(i_host.c.id, ondelete='CASCADE')),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )

    ceph_mon.create()

    # Move the data from the i_storconfig table to the new tables.
    i_storconfig = Table('i_storconfig', meta, autoload=True)

    # Obtain the i_storconfig entries.
    storcfg_items = list(i_storconfig.select().execute())

    # If there are two entries in the i_storconfig table, then it means that
    # Ceph backend was added over LVM.
    lvm_and_ceph = False
    if len(storcfg_items) > 1:
        lvm_and_ceph = True

    if storcfg_items:
        for storcfg in storcfg_items:

            # Populate the storage_backend table.
            storage_backend_insert = storage_backend.insert()  # pylint: disable=no-value-for-parameter
            storage_backend_uuid = str(uuid.uuid4())

            values = {
                'created_at': datetime.now(),
                'updated_at': None,
                'deleted_at': None,
                'uuid': storage_backend_uuid,
                'backend': storcfg['cinder_backend'],
                'state': storcfg['state'],
                'task': storcfg['task'],
                'forisystemid': storcfg['forisystemid'],
            }
            storage_backend_insert.execute(values)

            # Get the id of the new storage_backend entry.
            new_stor_id_sel = select([
                storage_backend
            ]).where(storage_backend.c.uuid == storage_backend_uuid)
            new_stor_id = conn.execute(new_stor_id_sel).fetchone()['id']

            # Populate the storage_lvm table.
            if storcfg['cinder_backend'] == 'lvm':
                storage_lvm_insert = storage_lvm.insert()  # pylint: disable=no-value-for-parameter

                values = {
                    'created_at': datetime.now(),
                    'updated_at': None,
                    'deleted_at': None,
                    'id': new_stor_id,
                    'cinder_device': storcfg['cinder_device'],
                    'cinder_gib': storcfg['cinder_gib'],
                }
                storage_lvm_insert.execute(values)

            # Populate the storage_ceph table.
            # Do this only if the backend of the current item is ceph.
            if storcfg['cinder_backend'] == 'ceph':
                if (storcfg['cinder_pool_gib'] or storcfg['glance_pool_gib']
                        or storcfg['ephemeral_pool_gib']):

                    storage_ceph_insert = storage_ceph.insert()  # pylint: disable=no-value-for-parameter
                    values = {
                        'created_at': datetime.now(),
                        'updated_at': None,
                        'deleted_at': None,
                        'id': new_stor_id,
                        'cinder_pool_gib': storcfg['cinder_pool_gib'],
                        'glance_pool_gib': storcfg['glance_pool_gib'],
                        'ephemeral_pool_gib': storcfg['ephemeral_pool_gib'],
                        'object_pool_gib': 0,
                        'object_gateway': False,
                    }
                    storage_ceph_insert.execute(values)

            # Populate the controller_fs table.
            # If Ceph was added over LVM, we need to take the data for
            # controller_fs from the LVM i_storconfig entry.
            fill_storage = True
            if lvm_and_ceph and storcfg['cinder_backend'] == 'ceph':
                fill_storage = False

            if fill_storage:
                controller_fs_insert = controller_fs.insert()  # pylint: disable=no-value-for-parameter
                controller_fs_uuid = str(uuid.uuid4())

                values = {
                    'created_at': datetime.now(),
                    'updated_at': None,
                    'deleted_at': None,
                    'uuid': controller_fs_uuid,
                    'database_gib': storcfg['database_gib'],
                    'cgcs_gib': storcfg['image_gib'],
                    'img_conversions_gib': storcfg['img_conversions_gib'],
                    'backup_gib': storcfg['backup_gib'],
                    'forisystemid': storcfg['forisystemid'],
                }
                controller_fs_insert.execute(values)

            # Populate the ceph_mon table.
            if storcfg['cinder_backend'] == 'ceph':
                if (storcfg['ceph_mon_dev_ctrl0']
                        and storcfg['ceph_mon_dev_ctrl1']
                        and storcfg['ceph_mon_gib']):
                    ceph_mon_insert_ctrl0 = ceph_mon.insert()  # pylint: disable=no-value-for-parameter
                    ceph_mon_insert_ctrl1 = ceph_mon.insert()  # pylint: disable=no-value-for-parameter

                    ctrl0_id_sel = select(
                        [i_host]).where(i_host.c.hostname == 'controller-0')
                    ctrl0_id = conn.execute(ctrl0_id_sel).fetchone()['id']
                    ctrl1_id_sel = select(
                        [i_host]).where(i_host.c.hostname == 'controller-1')
                    ctrl1_id = conn.execute(ctrl1_id_sel).fetchone()['id']

                    values0 = {
                        'created_at': datetime.now(),
                        'updated_at': None,
                        'deleted_at': None,
                        'uuid': str(uuid.uuid4()),
                        'device_node': storcfg['ceph_mon_dev_ctrl0'],
                        'ceph_mon_gib': storcfg['ceph_mon_gib'],
                        'forihostid': ctrl0_id,
                    }

                    values1 = {
                        'created_at': datetime.now(),
                        'updated_at': None,
                        'deleted_at': None,
                        'uuid': str(uuid.uuid4()),
                        'device_node': storcfg['ceph_mon_dev_ctrl1'],
                        'ceph_mon_gib': storcfg['ceph_mon_gib'],
                        'forihostid': ctrl1_id,
                    }

                    ceph_mon_insert_ctrl0.execute(values0)
                    ceph_mon_insert_ctrl1.execute(values1)

    # Delete the i_storconfig table.
    i_storconfig.drop()
Example #46
0
Base_automap.prepare(db_engine, reflect=True)
Session = sessionmaker(bind=db_engine)
db_session = Session()

aggregations = []
for aggregation in config.aggregations:
    for bin in (b for b in aggregation.bins if hasattr(b, 'geometry_column')):
        aggregations.append({
            'table': bin.table,
            'geometry_column': bin.geometry_column,
            'key': bin.key,
            'orm': eval('Base_automap.classes.%s' % bin.table)
        })

metadata = MetaData()
metadata.bind = db_engine
extra_data = Table('extra_data', metadata,
                   Column('id', Integer, primary_key=True),
                   Column('timestamp', DateTime), Column('verified', Boolean),
                   Column('bigquery_key', String),
                   Column('bigquery_test_id', String),
                   Column('location', Geometry("Point", srid=4326)),
                   Column('isp', String), Column('connection_type', String),
                   Column('advertised_download', Integer),
                   Column('actual_download', Float),
                   Column('advertised_upload', Integer),
                   Column('actual_upload', Float), Column('min_rtt', Integer),
                   Column('client_ip', BigInteger), Column('isp_user', String),
                   Column('cost_of_service', Integer))
metadata.create_all()
Example #47
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    # New table
    quota_classes = Table(
        'quota_classes',
        meta,
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None)),
        Column('id', Integer(), primary_key=True),
        Column('class_name', String(length=255), index=True),
        Column('resource', String(length=255)),
        Column('hard_limit', Integer(), nullable=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    try:
        quota_classes.create()
    except Exception:
        LOG.error(_("Table |%s| not created!"), repr(quota_classes))
        raise

    quota_usages = Table(
        'quota_usages',
        meta,
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None)),
        Column('id', Integer(), primary_key=True),
        Column('project_id', String(length=255), index=True),
        Column('resource', String(length=255)),
        Column('in_use', Integer(), nullable=False),
        Column('reserved', Integer(), nullable=False),
        Column('until_refresh', Integer(), nullable=True),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    try:
        quota_usages.create()
    except Exception:
        LOG.error(_("Table |%s| not created!"), repr(quota_usages))
        raise

    reservations = Table(
        'reservations',
        meta,
        Column('created_at', DateTime(timezone=False)),
        Column('updated_at', DateTime(timezone=False)),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None)),
        Column('id', Integer(), primary_key=True),
        Column('uuid', String(length=36), nullable=False),
        Column('usage_id',
               Integer(),
               ForeignKey('quota_usages.id'),
               nullable=False),
        Column('project_id', String(length=255), index=True),
        Column('resource', String(length=255)),
        Column('delta', Integer(), nullable=False),
        Column('expire', DateTime(timezone=False)),
        mysql_engine='InnoDB',
        mysql_charset='utf8',
    )

    try:
        reservations.create()
    except Exception:
        LOG.error(_("Table |%s| not created!"), repr(reservations))
        raise
Example #48
0
    def restore(self):
        """Generator function that yields tuples :
        (numer_of_steps_completed, total_number_of_steps, description_of_current_step)
        while performing a restore.
        """
        #
        # The restored database may contain different AuthenticationMechanisms
        #
        from camelot.model.authentication import clear_current_authentication
        clear_current_authentication()
        #
        # Proceed with the restore
        #
        import os
        from camelot.core.files.storage import StoredFile
        import settings
        from sqlalchemy import create_engine
        from sqlalchemy import MetaData
        from sqlalchemy.pool import NullPool

        yield (0, 0, _('Open backup file'))
        if self._storage:
            if not self._storage.exists(self._filename):
                raise Exception('Backup file does not exist')
            stored_file = StoredFile(self._storage, self._filename)
            filename = self._storage.checkout( stored_file )
        else:
            if not os.path.exists(self._filename):
                raise Exception('Backup file does not exist')
            filename = self._filename
        from_engine   = create_engine('sqlite:///%s'%filename, poolclass=NullPool )

        yield (0, 0, _('Prepare database for restore'))
        to_engine = settings.ENGINE()
        self.prepare_schema_for_restore(from_engine, to_engine)
          
        yield (0, 0, _('Analyzing backup structure'))
        from_meta_data = MetaData()
        from_meta_data.bind = from_engine
        from_meta_data.reflect()
        
        yield (0, 0, _('Analyzing database structure'))     
        to_meta_data = MetaData()
        to_meta_data.bind = to_engine
        to_meta_data.reflect()
        to_tables = list(table for table in to_meta_data.sorted_tables if self.restore_table_filter(table))
        number_of_tables = len(to_tables)
        steps = number_of_tables * 2 + 2
        
        for i,to_table in enumerate(reversed(to_tables)):
            yield (i, steps, _('Delete data from table %s')%to_table.name)
            self.delete_table_data(to_table)
        
        for i,to_table in enumerate(to_tables):
            if to_table.name in from_meta_data.tables:
                yield (number_of_tables+i, steps, _('Copy data from table %s')%to_table.name)
                self.copy_table_data(from_meta_data.tables[to_table.name], to_table)
                
        yield (number_of_tables * 2 + 1, steps, _('Update schema after restore'))
        self.update_schema_after_restore(from_engine, to_engine)
        
        yield (number_of_tables * 2 + 2, steps, _('Expire current session'))
        from sqlalchemy.orm.session import _sessions
        for session in _sessions.values():
            session.expire_all()
        
        yield (1, 1, _('Restore completed'))
def upgrade(migrate_engine):
    """Convert volume_type_id to UUID."""
    meta = MetaData()
    meta.bind = migrate_engine

    volumes = Table('volumes', meta, autoload=True)
    volume_types = Table('volume_types', meta, autoload=True)
    extra_specs = Table('volume_type_extra_specs', meta, autoload=True)

    fkey_remove_list = [volumes.c.volume_type_id,
                        volume_types.c.id,
                        extra_specs.c.volume_type_id]

    for column in fkey_remove_list:
        fkeys = list(column.foreign_keys)
        if fkeys:
            fkey_name = fkeys[0].constraint.name
            fkey = ForeignKeyConstraint(columns=[column],
                                        refcolumns=[volume_types.c.id],
                                        name=fkey_name)

            try:
                fkey.drop()
            except Exception:
                if migrate_engine.url.get_dialect().name.startswith('sqlite'):
                    pass
                else:
                    raise

    volumes.c.volume_type_id.alter(String(36))
    volume_types.c.id.alter(String(36))
    extra_specs.c.volume_type_id.alter(String(36))

    vtype_list = list(volume_types.select().execute())
    for t in vtype_list:
        new_id = str(uuid.uuid4())

        volumes.update().\
            where(volumes.c.volume_type_id == t['id']).\
            values(volume_type_id=new_id).execute()

        extra_specs.update().\
            where(extra_specs.c.volume_type_id == t['id']).\
            values(volume_type_id=new_id).execute()

        volume_types.update().\
            where(volume_types.c.id == t['id']).\
            values(id=new_id).execute()

    for column in fkey_remove_list:
        fkeys = list(column.foreign_keys)
        if fkeys:
            fkey_name = fkeys[0].constraint.name
            fkey = ForeignKeyConstraint(columns=[column],
                                        refcolumns=[volume_types.c.id],
                                        name=fkey_name)
            try:
                fkey.create()
            except Exception:
                if migrate_engine.url.get_dialect().name.startswith('sqlite'):
                    pass
                else:
                    raise
Example #50
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    for table_name, indexes in INDEXES_TO_CREATE.items():
        for index_name, columns in indexes.items():
            ensure_indexed(migrate_engine, table_name, index_name, columns)
Example #51
0
        # in case there is a session with this name
        else:
            self._verify_user_in_session(self.session)


if __name__ == '__main__':

    display_message('AMS', 'creating tables in database...')
    db.create_all()
    display_message('AMS', 'tables created in database.')

    ENGINE = create_engine('sqlite:///' + os.path.join(basedir, 'data.sqlite'))
    TWISTED_ENGINE = wrap_engine(reactor, ENGINE)
    TWISTED_ENGINE.run_callable = ENGINE.run_callable

    METADATA = MetaData()
    METADATA.bind = ENGINE
    AGENTS = Table('agents', METADATA, autoload=True, autoload_with=ENGINE)

    ams = AMS(host=sys.argv[4], port=int(sys.argv[5]))
    # instantiates AMS agent and calls listenTCP method
    # from Twisted to launch the agent
    ams.register_user(username=sys.argv[1],
                      email=sys.argv[2],
                      password=sys.argv[3])
    ams._initialize_database()
    reactor.callLater(0.1, display_message,
                      'ams@{}:{}'.format(ams.ams['name'], ams.ams['port']),
                      'PADE AMS service running right now....')
    reactor.run()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    bm_nodes = Table(
        'bm_nodes',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('cpus', Integer),
        Column('memory_mb', Integer),
        Column('local_gb', Integer),
        Column('pm_address', String(length=255)),
        Column('pm_user', String(length=255)),
        Column('pm_password', String(length=255)),
        Column('service_host', String(length=255)),
        Column('prov_mac_address', String(length=255)),
        Column('instance_uuid', String(length=36)),
        Column('registration_status', String(length=16)),
        Column('task_state', String(length=255)),
        Column('prov_vlan_id', Integer),
        Column('terminal_port', Integer),
        mysql_engine='InnoDB',
        #mysql_charset='utf8'
    )

    bm_interfaces = Table(
        'bm_interfaces',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('bm_node_id', Integer),
        Column('address', String(length=255)),
        Column('datapath_id', String(length=255)),
        Column('port_no', Integer),
        Column('vif_uuid', String(length=36)),
        mysql_engine='InnoDB',
        #mysql_charset='utf8'
    )

    bm_pxe_ips = Table(
        'bm_pxe_ips',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('address', String(length=255)),
        Column('bm_node_id', Integer),
        Column('server_address', String(length=255)),
        mysql_engine='InnoDB',
        #mysql_charset='utf8'
    )

    bm_deployments = Table(
        'bm_deployments',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', Boolean),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('bm_node_id', Integer),
        Column('key', String(length=255)),
        Column('image_path', String(length=255)),
        Column('pxe_config_path', String(length=255)),
        Column('root_mb', Integer),
        Column('swap_mb', Integer),
        mysql_engine='InnoDB',
        #mysql_charset='utf8'
    )

    bm_nodes.create()
    bm_interfaces.create()
    bm_pxe_ips.create()
    bm_deployments.create()
Example #53
0
 def backup(self):
     """Generator function that yields tuples :
     (numer_of_steps_completed, total_number_of_steps, description_of_current_step)
     while performing a backup.
     """
     import os
     import tempfile
     import shutil
     import settings
     from sqlalchemy import create_engine
     from sqlalchemy import MetaData
     from sqlalchemy.pool import NullPool
     from sqlalchemy.dialects import mysql as mysql_dialect
     import sqlalchemy.types
     
     yield (0, 0, _('Analyzing database structure'))
     from_engine = settings.ENGINE()
     from_meta_data = MetaData()
     from_meta_data.bind = from_engine
     from_meta_data.reflect()
             
     yield (0, 0, _('Preparing backup file'))
     #
     # We'll first store the backup in a temporary file, since
     # the selected file might be on a server or in a storage
     #
     file_descriptor, temp_file_name = tempfile.mkstemp(suffix='.db')
     os.close(file_descriptor)
     logger.info("preparing backup to '%s'"%temp_file_name)
     if os.path.exists(self._filename):
         os.remove(self._filename)
     to_engine   = create_engine( u'sqlite:///%s'%temp_file_name, poolclass=NullPool )
     to_meta_data = MetaData()
     to_meta_data.bind = to_engine
     #
     # Only copy tables, to prevent issues with indices and constraints
     #
     from_and_to_tables = []
     for from_table in from_meta_data.sorted_tables:
         if self.backup_table_filter(from_table):
             to_table = from_table.tometadata(to_meta_data)
             #
             # Dirty hack : loop over all columns to detect mysql TINYINT
             # columns and convert them to BOOL
             #
             for col in to_table.columns:
                 if isinstance(col.type, mysql_dialect.TINYINT):
                     col.type = sqlalchemy.types.Boolean()
             #
             # End of dirty hack
             #
             to_table.create(to_engine)
             from_and_to_tables.append((from_table, to_table))
     
     number_of_tables = len(from_and_to_tables)
     for i,(from_table, to_table) in enumerate(from_and_to_tables):
         yield (i, number_of_tables + 1, _('Copy data of table %s')%from_table.name)
         self.copy_table_data(from_table, to_table)
     yield (number_of_tables, number_of_tables + 1, _('Store backup at requested location') )
     if not self._storage:
         shutil.move(temp_file_name, self._filename)
     else:
         self._storage.checkin( temp_file_name, self._filename )
         os.remove( temp_file_name )
     yield (number_of_tables + 1, number_of_tables + 1, _('Backup completed'))
Example #54
0
            self.name = name

        def __repr__(self):
            return '<%s %r>' % (self.__class__.__name__, self.name)

    mapper(Animal,
           animals,
           properties={
               'facts':
               relation(AnimalFact,
                        backref='animal',
                        collection_class=attribute_mapped_collection('key')),
           })
    mapper(AnimalFact, facts)

    metadata.bind = 'sqlite:///'
    metadata.create_all()
    session = create_session()

    stoat = Animal(u'stoat')
    stoat[u'color'] = u'reddish'
    stoat[u'cuteness'] = u'somewhat'

    # dict-like assignment transparently creates entries in the
    # stoat.facts collection:
    print stoat.facts[u'color']

    session.add(stoat)
    session.flush()
    session.expunge_all()
Example #55
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    if migrate_engine.name == 'mysql':
        nameargs = {'collation': 'utf8_bin'}
    else:
        nameargs = {}
    resource_providers = Table(
        'resource_providers', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), nullable=False),
        Column('name', Unicode(200, **nameargs), nullable=True),
        Column('generation', Integer, default=0),
        Column('can_host', Integer, default=0),
        UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
        UniqueConstraint('name', name='uniq_resource_providers0name'),
        Index('resource_providers_name_idx', 'name'),
        Index('resource_providers_uuid_idx', 'uuid'),
        mysql_engine='InnoDB',
        mysql_charset='latin1'
    )

    inventories = Table(
        'inventories', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('resource_provider_id', Integer, nullable=False),
        Column('resource_class_id', Integer, nullable=False),
        Column('total', Integer, nullable=False),
        Column('reserved', Integer, nullable=False),
        Column('min_unit', Integer, nullable=False),
        Column('max_unit', Integer, nullable=False),
        Column('step_size', Integer, nullable=False),
        Column('allocation_ratio', Float, nullable=False),
        Index('inventories_resource_provider_id_idx',
              'resource_provider_id'),
        Index('inventories_resource_provider_resource_class_idx',
              'resource_provider_id', 'resource_class_id'),
        Index('inventories_resource_class_id_idx',
              'resource_class_id'),
        UniqueConstraint('resource_provider_id', 'resource_class_id',
            name='uniq_inventories0resource_provider_resource_class'),
        mysql_engine='InnoDB',
        mysql_charset='latin1'
    )

    allocations = Table(
        'allocations', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('resource_provider_id', Integer, nullable=False),
        Column('consumer_id', String(36), nullable=False),
        Column('resource_class_id', Integer, nullable=False),
        Column('used', Integer, nullable=False),
        Index('allocations_resource_provider_class_used_idx',
              'resource_provider_id', 'resource_class_id',
              'used'),
        Index('allocations_resource_class_id_idx',
              'resource_class_id'),
        Index('allocations_consumer_id_idx', 'consumer_id'),
        mysql_engine='InnoDB',
        mysql_charset='latin1'
    )

    resource_provider_aggregates = Table(
        'resource_provider_aggregates', meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('resource_provider_id', Integer, primary_key=True,
               nullable=False),
        Column('aggregate_id', Integer, primary_key=True, nullable=False),
        Index('resource_provider_aggregates_aggregate_id_idx',
              'aggregate_id'),
        mysql_engine='InnoDB',
        mysql_charset='latin1'
    )

    for table in [resource_providers, inventories, allocations,
                  resource_provider_aggregates]:
        table.create(checkfirst=True)
Example #56
0
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    Table('ports', meta, autoload=True, autoload_with=migrate_engine)
    Table('i_host', meta, autoload=True, autoload_with=migrate_engine)

    lldp_agents = Table(
        'lldp_agents',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('host_id', Integer, ForeignKey('i_host.id',
                                              ondelete='CASCADE')),
        Column('port_id', Integer, ForeignKey('ports.id', ondelete='CASCADE')),
        Column('status', String(255)),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    lldp_agents.create()

    lldp_neighbours = Table(
        'lldp_neighbours',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('uuid', String(36), unique=True),
        Column('host_id', Integer, ForeignKey('i_host.id',
                                              ondelete='CASCADE')),
        Column('port_id', Integer, ForeignKey('ports.id', ondelete='CASCADE')),
        Column('msap', String(511), nullable=False),
        UniqueConstraint('msap', 'port_id', name='u_msap_port_id'),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    lldp_neighbours.create()

    lldp_tlvs = Table(
        'lldp_tlvs',
        meta,
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('id', Integer, primary_key=True, nullable=False),
        Column('agent_id',
               Integer,
               ForeignKey('lldp_agents.id', ondelete="CASCADE"),
               nullable=True),
        Column('neighbour_id',
               Integer,
               ForeignKey('lldp_neighbours.id', ondelete="CASCADE"),
               nullable=True),
        Column('type', String(255)),
        Column('value', String(255)),
        mysql_engine=ENGINE,
        mysql_charset=CHARSET,
    )
    lldp_tlvs.create()
Example #57
0
def get_table(table_name):
    u"""通过表名在数据库中获得表对象"""
    meta = MetaData()
    meta.bind = main_db_engine
    table = Table(table_name, meta, autoload=True)
    return table
Example #58
0
File: io.py Project: mrc13/eGo
def geolocation_buses(network, session):
    """
    Use Geometries of buses x/y (lon/lat) and Polygons
    of Countries from RenpassGisParameterRegion
    in order to locate the buses

    Parameters
    ----------
    network : Network
        eTraGo Network
    session : sqlalchemy
        session to oedb

    ToDo
    ----
     - check eTrago stack generation plots and other in order of adaptation

    """
    # Start db connetion
    # get renpassG!S scenario data

    meta = MetaData()
    meta.bind = session.bind
    conn = meta.bind
    # get db table
    meta.reflect(bind=conn, schema='model_draft',
                 only=['renpass_gis_parameter_region'])

    # map to classes
    Base = automap_base(metadata=meta)
    Base.prepare()
    RenpassGISRegion = \
        Base.classes.renpass_gis_parameter_region

    # Define regions
    region_id = ['DE','DK', 'FR', 'BE', 'LU', \
                 'NO', 'PL', 'CH', 'CZ', 'SE', 'NL']

    query = session.query(RenpassGISRegion.gid, RenpassGISRegion.u_region_id,
                       RenpassGISRegion.stat_level, RenpassGISRegion.geom,
                       RenpassGISRegion.geom_point)

    # get regions by query and filter
    Regions =  [(gid, u_region_id, stat_level, shape.to_shape(geom),\
                shape.to_shape(geom_point)) for gid, u_region_id, stat_level,\
                 geom, geom_point in query.filter(RenpassGISRegion.u_region_id.\
                 in_(region_id)).all()]

    crs = {'init': 'epsg:4326'}
    # transform lon lat to shapely Points and create GeoDataFrame
    points = [Point(xy) for xy in zip( network.buses.x,  network.buses.y)]
    bus = gpd.GeoDataFrame(network.buses, crs=crs, geometry=points)
    # Transform Countries Polygons as Regions
    region = pd.DataFrame(Regions, columns=['id','country','stat_level','Polygon','Point'])
    re = gpd.GeoDataFrame(region, crs=crs, geometry=region['Polygon'])
    # join regions and buses by geometry which intersects
    busC = gpd.sjoin(bus, re, how='inner', op='intersects')
    #busC
    # Drop non used columns
    busC = busC.drop(['index_right', 'Point', 'id', 'Polygon', 'stat_level','geometry'], axis=1)
    # add busC to eTraGo.buses
    network.buses['country_code'] = busC['country']

    # close session
    session.close()

    return network
Example #59
0
def downgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine
    image_sync = Table('imge_sync', meta, autoload=True)
    image_sync.drop()
def upgrade(migrate_engine):
    meta = MetaData()
    meta.bind = migrate_engine

    build_requests = Table('build_requests', meta, autoload=True)
    build_requests.c.block_device_mappings.alter(type=api_models.MediumText())