def upgrade(migrate_engine): """Function adds network mtu, dhcp_server, and share_dhcp fields.""" meta = MetaData(bind=migrate_engine) networks = Table('networks', meta, autoload=True) shadow_networks = Table('shadow_networks', meta, autoload=True) # NOTE(vish): ignore duplicate runs of upgrade so this can # be backported mtu = Column('mtu', Integer) dhcp_server = Column('dhcp_server', types.IPAddress) enable_dhcp = Column('enable_dhcp', Boolean, default=True) share_address = Column('share_address', Boolean, default=False) if not hasattr(networks.c, 'mtu'): networks.create_column(mtu) if not hasattr(networks.c, 'dhcp_server'): networks.create_column(dhcp_server) if not hasattr(networks.c, 'enable_dhcp'): networks.create_column(enable_dhcp) if not hasattr(networks.c, 'share_address'): networks.create_column(share_address) if not hasattr(shadow_networks.c, 'mtu'): shadow_networks.create_column(mtu.copy()) if not hasattr(shadow_networks.c, 'dhcp_server'): shadow_networks.create_column(dhcp_server.copy()) if not hasattr(shadow_networks.c, 'enable_dhcp'): shadow_networks.create_column(enable_dhcp.copy()) if not hasattr(shadow_networks.c, 'share_address'): shadow_networks.create_column(share_address.copy())
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) migrations = Table('migrations', meta, autoload=True) shadow_migrations = Table('shadow_migrations', meta, autoload=True) enum = Enum('migration', 'resize', 'live-migration', 'evacuation', metadata=meta, name='migration_type') enum.create() migration_type = Column('migration_type', enum, nullable=True) if not hasattr(migrations.c, 'migration_type'): migrations.create_column(migration_type) if not hasattr(shadow_migrations.c, 'migration_type'): shadow_migrations.create_column(migration_type.copy()) hidden = Column('hidden', Boolean, default=False) if not hasattr(migrations.c, 'hidden'): migrations.create_column(hidden) if not hasattr(shadow_migrations.c, 'hidden'): shadow_migrations.create_column(hidden.copy())
def upgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') host = Column('host', String(255)) details = Column('details', Text) actions_events.create_column(host) actions_events.create_column(details) shadow_actions_events = utils.get_table(migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.create_column(host.copy()) shadow_actions_events.create_column(details.copy())
def upgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') host = Column('host', String(255)) details = Column('details', Text) actions_events.create_column(host) actions_events.create_column(details) shadow_actions_events = utils.get_table( migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.create_column(host.copy()) shadow_actions_events.create_column(details.copy())
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine shadow_prefix = api._SHADOW_TABLE_PREFIX uuid_col = Column('uuid', String(36)) pci_devices = utils.get_table(migrate_engine, 'pci_devices') if not hasattr(pci_devices.c, 'uuid'): pci_devices.create_column(uuid_col.copy()) shadow_pci_devices = utils.get_table(migrate_engine, shadow_prefix + 'pci_devices') if not hasattr(shadow_pci_devices.c, 'uuid'): shadow_pci_devices.create_column(uuid_col.copy())
def create_pk_cols(self): """ Create primary_key columns. That is, call the 'create_pk_cols' builders then add a primary key to the table if it hasn't already got one and needs one. This method is "semi-recursive" in some cases: it calls the create_keys method on ManyToOne relationships and those in turn call create_pk_cols on their target. It shouldn't be possible to have an infinite loop since a loop of primary_keys is not a valid situation. """ if self._pk_col_done: return self.call_builders('create_pk_cols') if not self.autoload: if self.parent: if self.inheritance == 'multi': # Add columns with foreign keys to the parent's primary # key columns parent_desc = self.parent._descriptor tablename = parent_desc.table_fullname join_clauses = [] fk_columns = [] for pk_col in parent_desc.primary_keys: colname = options.MULTIINHERITANCECOL_NAMEFORMAT % \ {'entity': self.parent.__name__.lower(), 'key': pk_col.key} # It seems like SA ForeignKey is not happy being given # a real column object when said column is not yet # attached to a table pk_col_name = "%s.%s" % (tablename, pk_col.key) col = Column(colname, pk_col.type, primary_key=True) fk_columns.append(col) self.add_column(col) join_clauses.append(col == pk_col) self.join_condition = and_(*join_clauses) self.add_constraint( ForeignKeyConstraint(fk_columns, parent_desc.primary_keys, ondelete='CASCADE')) elif self.inheritance == 'concrete': # Copy primary key columns from the parent. for col in self.parent._descriptor.columns: if col.primary_key: self.add_column(col.copy()) elif not self.has_pk and self.auto_primarykey: if isinstance(self.auto_primarykey, basestring): colname = self.auto_primarykey else: colname = options.DEFAULT_AUTO_PRIMARYKEY_NAME self.add_column( Column(colname, options.DEFAULT_AUTO_PRIMARYKEY_TYPE, primary_key=True)) self._pk_col_done = True
def create_pk_cols(self): """ Create primary_key columns. That is, call the 'create_pk_cols' builders then add a primary key to the table if it hasn't already got one and needs one. This method is "semi-recursive" in some cases: it calls the create_keys method on ManyToOne relationships and those in turn call create_pk_cols on their target. It shouldn't be possible to have an infinite loop since a loop of primary_keys is not a valid situation. """ if self._pk_col_done: return self.call_builders('create_pk_cols') if not self.autoload: if self.parent: if self.inheritance == 'multi': # Add columns with foreign keys to the parent's primary # key columns parent_desc = self.parent._descriptor # Unused, apparently # tablename = parent_desc.table_fullname join_clauses = [] fk_columns = [] for pk_col in parent_desc.primary_keys: colname = options.MULTIINHERITANCECOL_NAMEFORMAT % \ {'entity': self.parent.__name__.lower(), 'key': pk_col.key} # It seems like SA ForeignKey is not happy being given # a real column object when said column is not yet # attached to a table # pk_col_name = "%s.%s" % (tablename, pk_col.key) col = Column(colname, pk_col.type, primary_key=True) fk_columns.append(col) self.add_column(col) join_clauses.append(col == pk_col) self.join_condition = and_(*join_clauses) self.add_constraint( ForeignKeyConstraint( fk_columns, parent_desc.primary_keys, ondelete='CASCADE')) elif self.inheritance == 'concrete': # Copy primary key columns from the parent. for col in self.parent._descriptor.columns: if col.primary_key: self.add_column(col.copy()) elif not self.has_pk and self.auto_primarykey: if isinstance(self.auto_primarykey, string_types): colname = self.auto_primarykey else: colname = options.DEFAULT_AUTO_PRIMARYKEY_NAME self.add_column( Column(colname, options.DEFAULT_AUTO_PRIMARYKEY_TYPE, primary_key=True)) self._pk_col_done = True
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine services = Table('services', meta, autoload=True) reason = Column('disabled_reason', String(255)) services.create_column(reason) shadow_services = Table('shadow_services', meta, autoload=True) shadow_services.create_column(reason.copy())
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Adding min_vcpus/max_vcpus to instances table instances = Table('instances', meta, autoload=True) min_vcpus = Column('min_vcpus', Integer()) max_vcpus = Column('max_vcpus', Integer()) if not hasattr(instances.c, 'min_vcpus'): instances.create_column(min_vcpus) if not hasattr(instances.c, 'max_vcpus'): instances.create_column(max_vcpus) shadow_instances = Table('shadow_instances', meta, autoload=True) if not hasattr(shadow_instances.c, 'min_vcpus'): shadow_instances.create_column(min_vcpus.copy()) if not hasattr(shadow_instances.c, 'max_vcpus'): shadow_instances.create_column(max_vcpus.copy())
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) l3_closids = Column('l3_closids', Integer()) if not hasattr(compute_nodes.c, 'l3_closids'): compute_nodes.create_column(l3_closids) if not hasattr(shadow_compute_nodes.c, 'l3_closids'): shadow_compute_nodes.create_column(l3_closids.copy()) l3_closids_used = Column('l3_closids_used', Integer()) if not hasattr(compute_nodes.c, 'l3_closids_used'): compute_nodes.create_column(l3_closids_used) if not hasattr(shadow_compute_nodes.c, 'l3_closids_used'): shadow_compute_nodes.create_column(l3_closids_used.copy())
def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; bind # migrate_engine to your metadata meta = MetaData() meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) parent_project_id = Column('project_domain_id', String(255)) instance_table.create_column(parent_project_id) shadow_instances.create_column(parent_project_id.copy())
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) # Add a new column to store PCI device numa node pci_devices = Table('pci_devices', meta, autoload=True) shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True) numa_node = Column('numa_node', Integer, default=None) pci_devices.create_column(numa_node) shadow_pci_devices.create_column(numa_node.copy())
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) migrations = Table('migrations', meta, autoload=True) shadow_migrations = Table('shadow_migrations', meta, autoload=True) columns = ['memory_total', 'memory_processed', 'memory_remaining', 'disk_total', 'disk_processed', 'disk_remaining'] for column_name in columns: column = Column(column_name, BigInteger, nullable=True) migrations.create_column(column) shadow_migrations.create_column(column.copy())
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) migrations = Table("migrations", meta, autoload=True) shadow_migrations = Table("shadow_migrations", meta, autoload=True) enum = Enum("migration", "resize", "live-migration", "evacuation", metadata=meta, name="migration_type") enum.create() migration_type = Column("migration_type", enum, nullable=True) if not hasattr(migrations.c, "migration_type"): migrations.create_column(migration_type) if not hasattr(shadow_migrations.c, "migration_type"): shadow_migrations.create_column(migration_type.copy()) hidden = Column("hidden", Boolean, default=False) if not hasattr(migrations.c, "hidden"): migrations.create_column(hidden) if not hasattr(shadow_migrations.c, "hidden"): shadow_migrations.create_column(hidden.copy())
def copy_column(column: sa.Column) -> sa.Column: """copy a column, set some properties on it for history table creation""" original = column new = column.copy() original.info['history_copy'] = new for fk in column.foreign_keys: new.append_foreign_key(sa.ForeignKey(fk.target_fullname)) new.unique = False new.default = new.server_default = None return new
def upgrade(migrate_engine): """Function adds key_pairs type field.""" meta = MetaData(bind=migrate_engine) key_pairs = Table('key_pairs', meta, autoload=True) shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True) keypair_type = Column('type', String(16), nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH) key_pairs.create_column(keypair_type) shadow_key_pairs.create_column(keypair_type.copy())
def upgrade(engine): """Function adds console_passwd field.""" meta = MetaData(bind=engine) instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) console_passwd = Column('console_passwd', String(256), nullable=True) if not hasattr(instances.c, 'console_passwd'): instances.create_column(console_passwd) if not hasattr(shadow_instances.c, 'console_passwd'): shadow_instances.create_column(console_passwd.copy())
def upgrade(engine): """Function adds request_id field.""" meta = MetaData(bind=engine) pci_devices = Table('pci_devices', meta, autoload=True) shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True) request_id = Column('request_id', String(36), nullable=True) if not hasattr(pci_devices.c, 'request_id'): pci_devices.create_column(request_id) if not hasattr(shadow_pci_devices.c, 'request_id'): shadow_pci_devices.create_column(request_id.copy())
def upgrade(migrate_engine): """Function adds ephemeral storage encryption key uuid field.""" meta = MetaData(bind=migrate_engine) instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) ephemeral_key_uuid = Column('ephemeral_key_uuid', String(36)) instances.create_column(ephemeral_key_uuid) shadow_instances.create_column(ephemeral_key_uuid.copy()) migrate_engine.execute(instances.update().values(ephemeral_key_uuid=None)) migrate_engine.execute( shadow_instances.update().values(ephemeral_key_uuid=None))
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine shadow_prefix = api._SHADOW_TABLE_PREFIX tag = Column('tag', String(255)) vifs = utils.get_table(migrate_engine, 'virtual_interfaces') if not hasattr(vifs.c, 'tag'): vifs.create_column(tag.copy()) shadow_vifs = utils.get_table(migrate_engine, '%svirtual_interfaces' % shadow_prefix) if not hasattr(shadow_vifs.c, 'tag'): shadow_vifs.create_column(tag.copy()) bdm = utils.get_table(migrate_engine, 'block_device_mapping') if not hasattr(bdm.c, 'tag'): bdm.create_column(tag.copy()) shadow_bdm = utils.get_table(migrate_engine, '%sblock_device_mapping' % shadow_prefix) if not hasattr(shadow_bdm.c, 'tag'): shadow_bdm.create_column(tag.copy())
def upgrade(migrate_engine): """Function adds ephemeral storage encryption key uuid field.""" meta = MetaData(bind=migrate_engine) instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) ephemeral_key_uuid = Column('ephemeral_key_uuid', String(36)) instances.create_column(ephemeral_key_uuid) shadow_instances.create_column(ephemeral_key_uuid.copy()) migrate_engine.execute(instances.update(). values(ephemeral_key_uuid=None)) migrate_engine.execute(shadow_instances.update(). values(ephemeral_key_uuid=None))
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted' pci_devices = Table('pci_devices', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0, nullable=False), Column('id', Integer, primary_key=True), Column('compute_node_id', Integer, nullable=False), Column('address', String(12), nullable=False), Column('product_id', String(4)), Column('vendor_id', String(4)), Column('dev_type', String(8)), Column('dev_id', String(255)), Column('label', String(255), nullable=False), Column('status', String(36), nullable=False), Column('extra_info', Text, nullable=True), Column('instance_uuid', String(36), nullable=True), Index('ix_pci_devices_compute_node_id_deleted', 'compute_node_id', 'deleted'), Index('ix_pci_devices_instance_uuid_deleted', 'instance_uuid', 'deleted'), UniqueConstraint('compute_node_id', 'address', 'deleted', name=pci_devices_uc_name), mysql_engine='InnoDB', mysql_charset='utf8') try: pci_devices.create() utils.create_shadow_table(migrate_engine, table=pci_devices) except Exception: LOG.exception(_("Exception while creating table 'pci_devices'.")) raise try: compute_nodes = utils.get_table(migrate_engine, 'compute_nodes') pci_stats = Column('pci_stats', Text, nullable=True) compute_nodes.create_column(pci_stats) shadow_compute_nodes = utils.get_table( migrate_engine, api._SHADOW_TABLE_PREFIX + 'compute_nodes') shadow_compute_nodes.create_column(pci_stats.copy()) except Exception: LOG.exception(_("Exception for adding pci stats to compute node.")) raise
def upgrade(migrate_engine): """Function adds key_pairs type field.""" meta = MetaData(bind=migrate_engine) key_pairs = Table("key_pairs", meta, autoload=True) shadow_key_pairs = Table("shadow_key_pairs", meta, autoload=True) enum = Enum("ssh", "x509", metadata=meta, name="keypair_types") enum.create() keypair_type = Column("type", enum, nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH) if hasattr(key_pairs.c, "type"): key_pairs.c.type.drop() if hasattr(shadow_key_pairs.c, "type"): shadow_key_pairs.c.type.drop() key_pairs.create_column(keypair_type) shadow_key_pairs.create_column(keypair_type.copy())
def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) # Add a new column to store PCI device parent address pci_devices = Table('pci_devices', meta, autoload=True) shadow_pci_devices = Table('shadow_pci_devices', meta, autoload=True) parent_addr = Column('parent_addr', String(12), nullable=True) if not hasattr(pci_devices.c, 'parent_addr'): pci_devices.create_column(parent_addr) if not hasattr(shadow_pci_devices.c, 'parent_addr'): shadow_pci_devices.create_column(parent_addr.copy()) # Create index parent_index = Index('ix_pci_devices_compute_node_id_parent_addr_deleted', pci_devices.c.compute_node_id, pci_devices.c.parent_addr, pci_devices.c.deleted) parent_index.create(migrate_engine)
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) cleaned_column = Column('cleaned', Integer, default=0) instances.create_column(cleaned_column) shadow_instances.create_column(cleaned_column.copy()) cleaned_index = Index('instances_host_deleted_cleaned_idx', instances.c.host, instances.c.deleted, instances.c.cleaned) cleaned_index.create(migrate_engine) migrate_engine.execute( instances.update().where(instances.c.deleted > 0).values(cleaned=1)) migrate_engine.execute(shadow_instances.update().where( shadow_instances.c.deleted > 0).values(cleaned=1))
def upgrade(migrate_engine): """Function adds key_pairs type field.""" meta = MetaData(bind=migrate_engine) key_pairs = Table('key_pairs', meta, autoload=True) shadow_key_pairs = Table('shadow_key_pairs', meta, autoload=True) enum = Enum('ssh', 'x509', metadata=meta, name='keypair_types') enum.create() keypair_type = Column('type', enum, nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH) if hasattr(key_pairs.c, 'type'): key_pairs.c.type.drop() if hasattr(shadow_key_pairs.c, 'type'): shadow_key_pairs.c.type.drop() key_pairs.create_column(keypair_type) shadow_key_pairs.create_column(keypair_type.copy())
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) cleaned_column = Column('cleaned', Integer, default=0) instances.create_column(cleaned_column) shadow_instances.create_column(cleaned_column.copy()) cleaned_index = Index('instances_host_deleted_cleaned_idx', instances.c.host, instances.c.deleted, instances.c.cleaned) cleaned_index.create(migrate_engine) migrate_engine.execute(instances.update(). where(instances.c.deleted > 0). values(cleaned=1)) migrate_engine.execute(shadow_instances.update(). where(shadow_instances.c.deleted > 0). values(cleaned=1))
def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Add a new column host compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) # NOTE(sbauza) : Old compute nodes can report stats without this field, we # need to set it as nullable host = Column('host', String(255), nullable=True) if not hasattr(compute_nodes.c, 'host'): compute_nodes.create_column(host) if not hasattr(shadow_compute_nodes.c, 'host'): shadow_compute_nodes.create_column(host.copy()) # NOTE(sbauza) : Populate the host field with the value from the services # table will be done at the ComputeNode object level when save() ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes, name="uniq_compute_nodes0host0hypervisor_hostname") ukey.create()