def downgrade():
    """Move share_type_id from Share Instances to Shares table.

    This method can lead to data loss because only the share_type_id from the
    first share instance is moved to the shares table.
    """

    # NOTE(ganso): Adding back share_type_id to the shares table NOT as a
    # foreign key, as it was before.
    op.add_column('shares',
                  sa.Column('share_type_id', sa.String(36), nullable=True))
    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    for share in connection.execute(shares_table.select()):
        instance = connection.execute(share_instances_table.select().where(
            share['id'] == share_instances_table.c.share_id)).first()
        # pylint: disable=no-value-for-parameter
        op.execute(shares_table.update().where(
            shares_table.c.id == instance['share_id']).values(
                {'share_type_id': instance['share_type_id']}))

    op.drop_constraint('si_st_id_fk', 'share_instances', type_='foreignkey')
    op.drop_column('share_instances', 'share_type_id')
def downgrade():
    """Remove 'share_instance_access_map' table and add 'state' column back.

    This method can lead to data loss because only first state is saved in
    share_access_map table.
    """
    op.add_column('share_access_map', Column('state', String(length=255)))

    # NOTE(u_glide): Move all states from 'share_instance_access_map'
    # to 'share_access_map'
    connection = op.get_bind()
    access_table = utils.load_table('share_access_map', connection)
    instance_access_table = utils.load_table('share_instance_access_map',
                                             connection)

    share_access_rules = connection.execute(
        access_table.select().where(access_table.c.deleted == "False"))

    for access_rule in share_access_rules:
        access_mapping = connection.execute(
            instance_access_table.select().where(
                instance_access_table.c.access_id == access_rule['id'])
        ).first()

        # pylint: disable=no-value-for-parameter
        op.execute(
            access_table.update().where(
                access_table.c.id == access_rule['id']
            ).values({'state': access_mapping['state']})
        )

    op.drop_table('share_instance_access_map')
def downgrade():
    connection = op.get_bind()

    # Create old AZ fields
    op.add_column('services', Column('availability_zone', String(length=255)))
    op.add_column('share_instances',
                  Column('availability_zone', String(length=255)))

    # Migrate data
    az_table = utils.load_table('availability_zones', connection)
    share_instances_table = utils.load_table('share_instances', connection)
    services_table = utils.load_table('services', connection)

    for az in connection.execute(az_table.select()):
        op.execute(
            share_instances_table.update().where(
                share_instances_table.c.availability_zone_id == az.id
            ).values({'availability_zone': az.name})
        )
        op.execute(
            services_table.update().where(
                services_table.c.availability_zone_id == az.id
            ).values({'availability_zone': az.name})
        )

    # Remove AZ_id columns and AZ table
    op.drop_constraint('service_az_id_fk', 'services', type_='foreignkey')
    op.drop_column('services', 'availability_zone_id')
    op.drop_constraint('si_az_id_fk', 'share_instances', type_='foreignkey')
    op.drop_column('share_instances', 'availability_zone_id')
    op.drop_table('availability_zones')
def downgrade():
    """Move share_type_id from Share Instances to Shares table.

    This method can lead to data loss because only the share_type_id from the
    first share instance is moved to the shares table.
    """

    # NOTE(ganso): Adding back share_type_id to the shares table NOT as a
    # foreign key, as it was before.
    op.add_column("shares", sa.Column("share_type_id", sa.String(36), nullable=True))
    connection = op.get_bind()
    shares_table = utils.load_table("shares", connection)
    share_instances_table = utils.load_table("share_instances", connection)

    for share in connection.execute(shares_table.select()):
        instance = connection.execute(
            share_instances_table.select().where(share["id"] == share_instances_table.c.share_id)
        ).first()
        op.execute(
            shares_table.update()
            .where(shares_table.c.id == instance["share_id"])
            .values({"share_type_id": instance["share_type_id"]})
        )

    op.drop_constraint("si_st_id_fk", "share_instances", type_="foreignkey")
    op.drop_column("share_instances", "share_type_id")
예제 #5
0
    def setup_upgrade_data(self, engine):
        (self.volume_types, self.extra_specs) = self._get_fake_data()

        volume_types_table = utils.load_table('volume_types', engine)
        engine.execute(volume_types_table.insert(self.volume_types))
        extra_specs_table = utils.load_table('volume_type_extra_specs', engine)
        engine.execute(extra_specs_table.insert(self.extra_specs))
예제 #6
0
def remove_snapshot_instances_table(connection):
    with op.batch_alter_table("share_snapshots") as batch_op:
        batch_op.add_column(Column('status', String(length=255)))
        batch_op.add_column(Column('progress', String(length=255)))

    snapshots_table = utils.load_table('share_snapshots', connection)
    snapshots_inst_table = utils.load_table('share_snapshot_instances',
                                            connection)

    for snapshot_instance in connection.execute(snapshots_inst_table.select()):
        snapshot = connection.execute(
            snapshots_table.select().where(
                snapshots_table.c.id == snapshot_instance.snapshot_id)
        ).first()

        op.execute(
            snapshots_table.update().where(
                snapshots_table.c.id == snapshot.id
            ).values(
                {
                    'status': snapshot_instance['status'],
                    'progress': snapshot_instance['progress'],
                }
            )
        )

    op.drop_table('share_snapshot_instances')
    def setup_upgrade_data(self, engine):
        # Setup shares
        share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}]
        share_table = utils.load_table('shares', engine)
        for fixture in share_fixture:
            engine.execute(share_table.insert(fixture))

        # Setup share instances
        si_fixture = [
            {'id': 'foo_share_instance_id_oof',
             'share_id': share_fixture[0]['id']},
            {'id': 'bar_share_instance_id_rab',
             'share_id': share_fixture[1]['id']},
        ]
        si_table = utils.load_table('share_instances', engine)
        for fixture in si_fixture:
            engine.execute(si_table.insert(fixture))

        # Setup export locations
        el_fixture = [
            {'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']},
            {'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']},
        ]
        el_table = utils.load_table(self.el_table_name, engine)
        for fixture in el_fixture:
            engine.execute(el_table.insert(fixture))
    def setup_upgrade_data(self, engine):
        # Setup shares
        share_data = {'id': 'new_share_id'}
        s_table = utils.load_table('shares', engine)
        engine.execute(s_table.insert(share_data))

        # Setup share instances
        share_instance_data = {
            'id': 'new_share_instance_id',
            'share_id': share_data['id']
        }
        si_table = utils.load_table('share_instances', engine)
        engine.execute(si_table.insert(share_instance_data))

        # Setup share snapshots
        share_snapshot_data = {
            'id': 'new_snapshot_id',
            'share_id': share_data['id']}
        snap_table = utils.load_table('share_snapshots', engine)
        engine.execute(snap_table.insert(share_snapshot_data))

        # Setup snapshot instances
        snapshot_instance_data = {
            'id': 'new_snapshot_instance_id',
            'snapshot_id': share_snapshot_data['id'],
            'share_instance_id': share_instance_data['id']
        }
        snap_i_table = utils.load_table('share_snapshot_instances', engine)
        engine.execute(snap_i_table.insert(snapshot_instance_data))
def downgrade_export_locations_table(connection):
    op.rename_table('share_instance_export_locations',
                    'share_export_locations')
    op.add_column(
        'share_export_locations',
        Column('share_id', String(36),
               ForeignKey('shares.id', name="sel_id_fk"))
    )

    # Convert share_instance_id to share_id
    share_el_table = utils.load_table('share_export_locations', connection)
    share_instances_table = utils.load_table('share_instances', connection)
    for export in connection.execute(share_el_table.select()):
        share_instance = connection.execute(
            share_instances_table.select().where(
                share_instances_table.c.id == export.share_instance_id)
        ).first()

        op.execute(
            share_el_table.update().where(
                share_el_table.c.id == export.id
            ).values({'share_id': six.text_type(share_instance.share_id)})
        )

    with op.batch_alter_table("share_export_locations") as batch_op:
        batch_op.drop_constraint('sel_instance_id_fk', type_='foreignkey')
        batch_op.drop_column('share_instance_id')
def downgrade_export_locations_table(connection):
    op.rename_table('share_instance_export_locations',
                    'share_export_locations')
    op.add_column(
        'share_export_locations',
        Column('share_id', String(36), ForeignKey('shares.id',
                                                  name="sel_id_fk")))

    # Convert share_instance_id to share_id
    share_el_table = utils.load_table('share_export_locations', connection)
    share_instances_table = utils.load_table('share_instances', connection)
    for export in connection.execute(share_el_table.select()):
        share_instance = connection.execute(
            share_instances_table.select().where(
                share_instances_table.c.id ==
                export.share_instance_id)).first()

        # pylint: disable=no-value-for-parameter
        op.execute(share_el_table.update().where(
            share_el_table.c.id == export.id).values(
                {'share_id': str(share_instance.share_id)}))

    with op.batch_alter_table("share_export_locations") as batch_op:
        batch_op.drop_constraint('sel_instance_id_fk', type_='foreignkey')
        batch_op.drop_column('share_instance_id')
def upgrade():
    op.add_column('share_instance_access_map',
                  Column('state', String(length=255),
                         default=constants.ACCESS_STATE_QUEUED_TO_APPLY))

    connection = op.get_bind()
    share_instances_table = utils.load_table('share_instances', connection)
    instance_access_map_table = utils.load_table('share_instance_access_map',
                                                 connection)

    instances_query = (
        share_instances_table.select().where(
            share_instances_table.c.status ==
            constants.STATUS_AVAILABLE).where(
            share_instances_table.c.deleted == 'False')
    )

    for instance in connection.execute(instances_query):
        access_rule_status = instance['access_rules_status']
        op.execute(
            instance_access_map_table.update().where(
                instance_access_map_table.c.share_instance_id == instance['id']
            ).values({
                'state': access_rules_status_to_state_mapping[
                    access_rule_status],
            })
        )
        op.execute(
            share_instances_table.update().where(
                share_instances_table.c.id == instance['id']
            ).values({
                'access_rules_status': access_rules_status_upgrade_mapping[
                    access_rule_status],
            })
        )
예제 #12
0
    def setup_upgrade_data(self, engine):
        # Setup shares
        share_fixture = [{'id': 'foo_share_id'}, {'id': 'bar_share_id'}]
        share_table = utils.load_table('shares', engine)
        for fixture in share_fixture:
            engine.execute(share_table.insert(fixture))

        # Setup share instances
        si_fixture = [
            {'id': 'foo_share_instance_id_oof',
             'share_id': share_fixture[0]['id']},
            {'id': 'bar_share_instance_id_rab',
             'share_id': share_fixture[1]['id']},
        ]
        si_table = utils.load_table('share_instances', engine)
        for fixture in si_fixture:
            engine.execute(si_table.insert(fixture))

        # Setup export locations
        el_fixture = [
            {'id': 1, 'path': '/1', 'share_instance_id': si_fixture[0]['id']},
            {'id': 2, 'path': '/2', 'share_instance_id': si_fixture[1]['id']},
        ]
        el_table = utils.load_table(self.el_table_name, engine)
        for fixture in el_fixture:
            engine.execute(el_table.insert(fixture))
def upgrade():
    """Move share_type_id from Shares to Share Instances table."""

    # NOTE(ganso): Adding share_type_id as a foreign key to share_instances
    # table. Please note that share_type_id is NOT a foreign key in shares
    # table prior to this migration.
    op.add_column(
        'share_instances',
        sa.Column('share_type_id',
                  sa.String(36),
                  sa.ForeignKey('share_types.id', name='si_st_id_fk'),
                  nullable=True))
    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    for instance in connection.execute(share_instances_table.select()):
        share = connection.execute(shares_table.select().where(
            instance['share_id'] == shares_table.c.id)).first()
        # pylint: disable=no-value-for-parameter
        op.execute(share_instances_table.update().where(
            share_instances_table.c.id == instance['id']).values(
                {'share_type_id': share['share_type_id']}))

    op.drop_column('shares', 'share_type_id')
def upgrade():
    op.add_column(
        'share_instance_access_map',
        Column('state',
               String(length=255),
               default=constants.ACCESS_STATE_QUEUED_TO_APPLY))

    connection = op.get_bind()
    share_instances_table = utils.load_table('share_instances', connection)
    instance_access_map_table = utils.load_table('share_instance_access_map',
                                                 connection)

    instances_query = (share_instances_table.select().where(
        share_instances_table.c.status == constants.STATUS_AVAILABLE).where(
            share_instances_table.c.deleted == 'False'))

    for instance in connection.execute(instances_query):
        access_rule_status = instance['access_rules_status']
        op.execute(instance_access_map_table.update().where(
            instance_access_map_table.c.share_instance_id ==
            instance['id']).values({
                'state':
                access_rules_status_to_state_mapping[access_rule_status],
            }))
        op.execute(share_instances_table.update().where(
            share_instances_table.c.id == instance['id']).values({
                'access_rules_status':
                access_rules_status_upgrade_mapping[access_rule_status],
            }))
def downgrade():
    """Move share_type_id from Share Instances to Shares table.

    This method can lead to data loss because only the share_type_id from the
    first share instance is moved to the shares table.
    """

    # NOTE(ganso): Adding back share_type_id to the shares table NOT as a
    # foreign key, as it was before.
    op.add_column(
        'shares',
        sa.Column('share_type_id', sa.String(36), nullable=True))
    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    for share in connection.execute(shares_table.select()):
        instance = connection.execute(share_instances_table.select().where(
            share['id'] == share_instances_table.c.share_id)).first()
        # pylint: disable=no-value-for-parameter
        op.execute(shares_table.update().where(
            shares_table.c.id == instance['share_id']).values(
            {'share_type_id': instance['share_type_id']}))

    op.drop_constraint('si_st_id_fk', 'share_instances', type_='foreignkey')
    op.drop_column('share_instances', 'share_type_id')
예제 #16
0
def downgrade():
    op.add_column('share_instance_access_map',
                  Column('state', String(length=255)))

    connection = op.get_bind()
    share_instances_table = utils.load_table('share_instances', connection)
    instance_access_table = utils.load_table('share_instance_access_map',
                                             connection)

    instances_query = (share_instances_table.select().where(
        share_instances_table.c.status == constants.STATUS_AVAILABLE).where(
            share_instances_table.c.deleted == 'False'))

    for instance in connection.execute(instances_query):

        # NOTE(u_glide): We cannot determine if a rule is applied or not in
        # Manila, so administrator should manually handle such access rules.
        if instance['access_rules_status'] == 'active':
            state = 'active'
        else:
            state = 'error'

        op.execute(instance_access_table.update().where(
            instance_access_table.c.share_instance_id == instance['id']).where(
                instance_access_table.c.deleted == 'False').values(
                    {'state': state}))

    op.drop_column('share_instances', 'access_rules_status')
def downgrade():
    op.add_column(
        'share_instance_access_map',
        Column('state', String(length=255))
    )

    connection = op.get_bind()
    share_instances_table = utils.load_table('share_instances', connection)
    instance_access_table = utils.load_table('share_instance_access_map',
                                             connection)

    instances_query = (
        share_instances_table.select()
        .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)
        .where(share_instances_table.c.deleted == 'False')
    )

    for instance in connection.execute(instances_query):

        state = downgrade_data_mapping[instance['access_rules_status']]

        op.execute(
            instance_access_table.update().where(
                instance_access_table.c.share_instance_id == instance['id']
            ).where(instance_access_table.c.deleted == 'False').values(
                {'state': state}
            )
        )

    op.drop_column('share_instances', 'access_rules_status')
예제 #18
0
    def setup_upgrade_data(self, engine):
        # Setup shares
        share_data = {'id': 'new_share_id'}
        s_table = utils.load_table('shares', engine)
        engine.execute(s_table.insert(share_data))

        # Setup share instances
        share_instance_data = {
            'id': 'new_share_instance_id',
            'share_id': share_data['id']
        }
        si_table = utils.load_table('share_instances', engine)
        engine.execute(si_table.insert(share_instance_data))

        # Setup share snapshots
        share_snapshot_data = {
            'id': 'new_snapshot_id',
            'share_id': share_data['id']}
        snap_table = utils.load_table('share_snapshots', engine)
        engine.execute(snap_table.insert(share_snapshot_data))

        # Setup snapshot instances
        snapshot_instance_data = {
            'id': 'new_snapshot_instance_id',
            'snapshot_id': share_snapshot_data['id'],
            'share_instance_id': share_instance_data['id']
        }
        snap_i_table = utils.load_table('share_snapshot_instances', engine)
        engine.execute(snap_i_table.insert(snapshot_instance_data))
    def setup_upgrade_data(self, engine):
        user_id = "user_id"
        project_id = "project_id"
        share_server_id = "share_server_id_foo"

        # Create share network
        share_network_data = {"id": self.sn_ids[0], "user_id": user_id, "project_id": project_id}
        sn_table = utils.load_table(self.sn_table_name, engine)
        engine.execute(sn_table.insert(share_network_data))

        # Create share server
        share_server_data = {
            "id": share_server_id,
            "share_network_id": share_network_data["id"],
            "host": "fake_host",
            "status": "active",
        }
        ss_table = utils.load_table("share_servers", engine)
        engine.execute(ss_table.insert(share_server_data))

        # Create network allocations
        network_allocations = [
            {"id": self.na_ids[0], "share_server_id": share_server_id, "ip_address": "1.1.1.1"},
            {"id": self.na_ids[1], "share_server_id": share_server_id, "ip_address": "2.2.2.2"},
        ]
        na_table = utils.load_table(self.na_table_name, engine)
        engine.execute(na_table.insert(network_allocations))
예제 #20
0
def downgrade():
    """Remove 'share_instance_access_map' table and add 'state' column back.

    This method can lead to data loss because only first state is saved in
    share_access_map table.
    """
    op.add_column('share_access_map', Column('state', String(length=255)))

    # NOTE(u_glide): Move all states from 'share_instance_access_map'
    # to 'share_access_map'
    connection = op.get_bind()
    access_table = utils.load_table('share_access_map', connection)
    instance_access_table = utils.load_table('share_instance_access_map',
                                             connection)

    for access_rule in connection.execute(access_table.select()):
        access_mapping = connection.execute(instance_access_table.select(
        ).where(instance_access_table.c.deleted == "False").where(
            instance_access_table.c.access_id == access_rule['id'])).first()

        op.execute(access_table.update().where(
            access_table.c.id == access_rule['id']).values(
                {'state': access_mapping['state']}))

    op.drop_table('share_instance_access_map')
def upgrade():

    LOG.info("Adding cast_rules_to_readonly column to share instances.")

    op.add_column('share_instances',
                  sa.Column('cast_rules_to_readonly', sa.Boolean,
                            default=False))

    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    # First, set the value of ``cast_rules_to_readonly`` in every existing
    # share instance to False
    op.execute(
        share_instances_table.update().values({
            'cast_rules_to_readonly': False,
        })
    )

    # Set the value of ``cast_rules_to_readonly`` to True for secondary
    # replicas in 'readable' replication relationships
    replicated_shares_query = (
        shares_table.select()
        .where(shares_table.c.deleted == 'False')
        .where(shares_table.c.replication_type
               == constants.REPLICATION_TYPE_READABLE)
    )

    for replicated_share in connection.execute(replicated_shares_query):
        # NOTE (gouthamr): Only secondary replicas that are not undergoing a
        # 'replication_change' (promotion to active) are considered. When the
        # replication change is complete, the share manager will take care
        # of ensuring the correct values for the replicas that were involved
        # in the transaction.
        secondary_replicas_query = (
            share_instances_table.select().where(
                share_instances_table.c.deleted == 'False').where(
                share_instances_table.c.replica_state
                    != constants.REPLICA_STATE_ACTIVE).where(
                share_instances_table.c.status
                    != constants.STATUS_REPLICATION_CHANGE).where(
                replicated_share['id'] == share_instances_table.c.share_id
            )
        )
        for replica in connection.execute(secondary_replicas_query):
            op.execute(
                share_instances_table.update().where(
                    share_instances_table.c.id == replica.id
                ).values({
                    'cast_rules_to_readonly': True,
                })
            )

    op.alter_column('share_instances',
                    'cast_rules_to_readonly',
                    existing_type=sa.Boolean,
                    existing_server_default=False,
                    nullable=False)
예제 #22
0
    def setup_upgrade_data(self, engine):

        share_table = utils.load_table('shares', engine)

        share = {
            'id': 1,
            'share_proto': "NFS",
            'size': 0,
            'snapshot_id': None,
            'user_id': 'fake',
            'project_id': 'fake',
        }

        engine.execute(share_table.insert(share))

        rules1 = [
            {'id': 'r1', 'share_instance_id': 1, 'state': 'active',
             'deleted': 'False'},
            {'id': 'r2', 'share_instance_id': 1, 'state': 'active',
             'deleted': 'False'},
            {'id': 'r3', 'share_instance_id': 1, 'state': 'deleting',
             'deleted': 'False'},
        ]
        rules2 = [
            {'id': 'r4', 'share_instance_id': 2, 'state': 'active',
             'deleted': 'False'},
            {'id': 'r5', 'share_instance_id': 2, 'state': 'error',
             'deleted': 'False'},
        ]

        rules3 = [
            {'id': 'r6', 'share_instance_id': 3, 'state': 'new',
             'deleted': 'False'},
        ]

        instance_fixtures = [
            {'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1,
             'status': 'available', 'rules': rules1},
            {'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1,
             'status': 'available', 'rules': rules2},
            {'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1,
             'status': 'available', 'rules': rules3},
            {'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1,
             'status': 'deleting', 'rules': []},
        ]

        share_instances_table = utils.load_table('share_instances', engine)
        share_instances_rules_table = utils.load_table(
            'share_instance_access_map', engine)

        for fixture in instance_fixtures:
            rules = fixture.pop('rules')
            engine.execute(share_instances_table.insert(fixture))

            for rule in rules:
                engine.execute(share_instances_rules_table.insert(rule))
    def setup_upgrade_data(self, engine):

        share_table = utils.load_table('shares', engine)

        share = {
            'id': 1,
            'share_proto': "NFS",
            'size': 0,
            'snapshot_id': None,
            'user_id': 'fake',
            'project_id': 'fake',
        }

        engine.execute(share_table.insert(share))

        rules1 = [
            {'id': 'r1', 'share_instance_id': 1, 'state': 'active',
             'deleted': 'False'},
            {'id': 'r2', 'share_instance_id': 1, 'state': 'active',
             'deleted': 'False'},
            {'id': 'r3', 'share_instance_id': 1, 'state': 'deleting',
             'deleted': 'False'},
        ]
        rules2 = [
            {'id': 'r4', 'share_instance_id': 2, 'state': 'active',
             'deleted': 'False'},
            {'id': 'r5', 'share_instance_id': 2, 'state': 'error',
             'deleted': 'False'},
        ]

        rules3 = [
            {'id': 'r6', 'share_instance_id': 3, 'state': 'new',
             'deleted': 'False'},
        ]

        instance_fixtures = [
            {'id': 1, 'deleted': 'False', 'host': 'fake1', 'share_id': 1,
             'status': 'available', 'rules': rules1},
            {'id': 2, 'deleted': 'False', 'host': 'fake2', 'share_id': 1,
             'status': 'available', 'rules': rules2},
            {'id': 3, 'deleted': 'False', 'host': 'fake3', 'share_id': 1,
             'status': 'available', 'rules': rules3},
            {'id': 4, 'deleted': 'False', 'host': 'fake4', 'share_id': 1,
             'status': 'deleting', 'rules': []},
        ]

        share_instances_table = utils.load_table('share_instances', engine)
        share_instances_rules_table = utils.load_table(
            'share_instance_access_map', engine)

        for fixture in instance_fixtures:
            rules = fixture.pop('rules')
            engine.execute(share_instances_table.insert(fixture))

            for rule in rules:
                engine.execute(share_instances_rules_table.insert(rule))
    def _load_tables_and_get_data(self, engine):
        share_table = utils.load_table("shares", engine)
        share_instances_table = utils.load_table("share_instances", engine)

        shares = engine.execute(share_table.select().where(share_table.c.id.in_(self.valid_share_ids))).fetchall()
        share_instances = engine.execute(
            share_instances_table.select().where(share_instances_table.c.share_id.in_(self.valid_share_ids))
        ).fetchall()

        return shares, share_instances
예제 #25
0
def upgrade():

    LOG.info("Adding cast_rules_to_readonly column to share instances.")

    op.add_column(
        'share_instances',
        sa.Column('cast_rules_to_readonly', sa.Boolean, default=False))

    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    # First, set the value of ``cast_rules_to_readonly`` in every existing
    # share instance to False
    # pylint: disable=no-value-for-parameter
    op.execute(share_instances_table.update().values({
        'cast_rules_to_readonly':
        False,
    }))

    # Set the value of ``cast_rules_to_readonly`` to True for secondary
    # replicas in 'readable' replication relationships
    replicated_shares_query = (shares_table.select().where(
        shares_table.c.deleted == 'False').where(
            shares_table.c.replication_type ==
            constants.REPLICATION_TYPE_READABLE))

    for replicated_share in connection.execute(replicated_shares_query):
        # NOTE (gouthamr): Only secondary replicas that are not undergoing a
        # 'replication_change' (promotion to active) are considered. When the
        # replication change is complete, the share manager will take care
        # of ensuring the correct values for the replicas that were involved
        # in the transaction.
        secondary_replicas_query = (share_instances_table.select().where(
            share_instances_table.c.deleted == 'False').where(
                share_instances_table.c.replica_state !=
                constants.REPLICA_STATE_ACTIVE).where(
                    share_instances_table.c.status !=
                    constants.STATUS_REPLICATION_CHANGE).where(
                        replicated_share['id'] ==
                        share_instances_table.c.share_id))
        for replica in connection.execute(secondary_replicas_query):
            # pylint: disable=no-value-for-parameter
            op.execute(share_instances_table.update().where(
                share_instances_table.c.id == replica.id).values({
                    'cast_rules_to_readonly':
                    True,
                }))

    op.alter_column('share_instances',
                    'cast_rules_to_readonly',
                    existing_type=sa.Boolean,
                    existing_server_default=False,
                    nullable=False)
    def check_upgrade(self, engine, _):
        az_table = utils.load_table("availability_zones", engine)

        for az in engine.execute(az_table.select()):
            self.test_case.assertTrue(uuidutils.is_uuid_like(az.id))
            self.test_case.assertTrue(az.name in self.valid_az_names)
            self.test_case.assertEqual("False", az.deleted)

        services_table = utils.load_table("services", engine)
        for service in engine.execute(services_table.select()):
            self.test_case.assertTrue(uuidutils.is_uuid_like(service.availability_zone_id))
예제 #27
0
    def _load_tables_and_get_data(self, engine):
        share_table = utils.load_table('shares', engine)
        share_instances_table = utils.load_table('share_instances', engine)

        shares = engine.execute(share_table.select().where(
            share_table.c.id.in_(self.valid_share_ids))).fetchall()
        share_instances = engine.execute(share_instances_table.select().where(
            share_instances_table.c.share_id.in_(
                self.valid_share_ids))).fetchall()

        return shares, share_instances
def create_snapshot_instances_table(connection):
    # Create 'share_snapshot_instances' table
    snapshot_instances_table = op.create_table(
        'share_snapshot_instances',
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', String(length=36), default='False'),
        Column('id', String(length=36), primary_key=True, nullable=False),
        Column('snapshot_id', String(length=36),
               ForeignKey('share_snapshots.id', name="ssi_snapshot_fk")),
        Column('share_instance_id', String(length=36),
               ForeignKey('share_instances.id', name="ssi_share_instance_fk")),
        Column('status', String(length=255)),
        Column('progress', String(length=255)),
        mysql_engine='InnoDB',
        mysql_charset='utf8')

    # Migrate data from share_snapshots to share_snapshot_instances
    snapshot_instances = []
    snapshot_table = utils.load_table('share_snapshots', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    for snapshot in connection.execute(snapshot_table.select()):
        share_instances_rows = connection.execute(
            share_instances_table.select().where(
                share_instances_table.c.share_id == snapshot.share_id))
        snapshot_instances.append({
            'created_at':
            snapshot.created_at,
            'updated_at':
            snapshot.updated_at,
            'deleted_at':
            snapshot.deleted_at,
            'deleted':
            snapshot.deleted,
            'id':
            snapshot.id,
            'snapshot_id':
            snapshot.id,
            'status':
            snapshot.status,
            'progress':
            snapshot.progress,
            'share_instance_id':
            share_instances_rows.first().id,
        })
    op.bulk_insert(snapshot_instances_table, snapshot_instances)

    # Remove columns moved to 'share_snapshot_instances' table
    with op.batch_alter_table("share_snapshots") as batch_op:
        batch_op.drop_column('status')
        batch_op.drop_column('progress')
예제 #29
0
    def check_upgrade(self, engine, _):
        az_table = utils.load_table('availability_zones', engine)

        for az in engine.execute(az_table.select()):
            self.test_case.assertTrue(uuidutils.is_uuid_like(az.id))
            self.test_case.assertIn(az.name, self.valid_az_names)
            self.test_case.assertEqual('False', az.deleted)

        services_table = utils.load_table('services', engine)
        for service in engine.execute(services_table.select()):
            self.test_case.assertTrue(
                uuidutils.is_uuid_like(service.availability_zone_id))
    def setup_upgrade_data(self, engine):
        user_id = "123456789123456789"
        project_id = "project_id"

        # Create share network data
        share_network_data = {"id": "foo_share_network_id_2", "user_id": user_id, "project_id": project_id}
        sn_table = utils.load_table("share_networks", engine)
        engine.execute(sn_table.insert(share_network_data))

        # Create security_service data
        security_services_data = {"id": "foo_security_services_id", "type": "foo_type", "project_id": project_id}
        ss_table = utils.load_table("security_services", engine)
        engine.execute(ss_table.insert(security_services_data))
예제 #31
0
    def setup_upgrade_data(self, engine):

        shares_table = utils.load_table('shares', engine)
        share_instances_table = utils.load_table('share_instances', engine)
        share_types_table = utils.load_table('share_types', engine)

        for stype in self.some_share_types:
            engine.execute(share_types_table.insert(stype))

        for share in self.some_shares:
            engine.execute(shares_table.insert(share))

        for instance in self.some_instances:
            engine.execute(share_instances_table.insert(instance))
예제 #32
0
    def setup_upgrade_data(self, engine):

        shares_table = utils.load_table('shares', engine)
        share_instances_table = utils.load_table('share_instances', engine)
        share_types_table = utils.load_table('share_types', engine)

        for stype in self.some_share_types:
            engine.execute(share_types_table.insert(stype))

        for share in self.some_shares:
            engine.execute(shares_table.insert(share))

        for instance in self.some_instances:
            engine.execute(share_instances_table.insert(instance))
예제 #33
0
    def check_downgrade(self, engine):

        shares_table = utils.load_table('shares', engine)
        share_instances_table = utils.load_table('share_instances', engine)

        for instance in engine.execute(share_instances_table.select().where(
                share_instances_table.c.id in self.instance_ids)):
            self.test_case.assertNotIn('share_type_id', instance)

        for share in engine.execute(share_instances_table.select().where(
                shares_table.c.id in self.share_ids)):
            self.test_case.assertEqual(
                next((x for x in self.some_shares if share['id'] == x['id']),
                     None)['share_type_id'], share['share_type_id'])
예제 #34
0
    def check_upgrade(self, engine, data):
        el_table = utils.load_table('share_instance_export_locations', engine)
        for el in engine.execute(el_table.select()):
            self.test_case.assertTrue(hasattr(el, 'is_admin_only'))
            self.test_case.assertTrue(hasattr(el, 'uuid'))
            self.test_case.assertEqual(False, el.is_admin_only)
            self.test_case.assertTrue(uuidutils.is_uuid_like(el.uuid))

        # Write export location metadata
        el_metadata = [
            {
                'key': 'foo_key',
                'value': 'foo_value',
                'export_location_id': 1
            },
            {
                'key': 'bar_key',
                'value': 'bar_value',
                'export_location_id': 2
            },
        ]
        elm_table = utils.load_table(self.elm_table_name, engine)
        engine.execute(elm_table.insert(el_metadata))

        # Verify values of written metadata
        for el_meta_datum in el_metadata:
            el_id = el_meta_datum['export_location_id']
            records = engine.execute(elm_table.select().where(
                elm_table.c.export_location_id == el_id))
            self.test_case.assertEqual(1, records.rowcount)
            record = records.first()

            expected_keys = (
                'id',
                'created_at',
                'updated_at',
                'deleted_at',
                'deleted',
                'export_location_id',
                'key',
                'value',
            )
            self.test_case.assertEqual(len(expected_keys), len(record.keys()))
            for key in expected_keys:
                self.test_case.assertIn(key, record.keys())

            for k, v in el_meta_datum.items():
                self.test_case.assertTrue(hasattr(record, k))
                self.test_case.assertEqual(v, getattr(record, k))
def upgrade():
    """Transform individual access rules states to 'access_rules_status'.

    WARNING: This method performs lossy converting of existing data in DB.
    """
    op.add_column(
        'share_instances',
        Column('access_rules_status', String(length=255))
    )

    connection = op.get_bind()
    share_instances_table = utils.load_table('share_instances', connection)
    instance_access_table = utils.load_table('share_instance_access_map',
                                             connection)

    # NOTE(u_glide): Data migrations shouldn't be performed on live clouds
    # because it will lead to unpredictable behaviour of running operations
    # like migration.
    instances_query = (
        share_instances_table.select()
        .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)
        .where(share_instances_table.c.deleted == 'False')
    )

    for instance in connection.execute(instances_query):

        access_mappings_query = instance_access_table.select().where(
            instance_access_table.c.share_instance_id == instance['id']
        ).where(instance_access_table.c.deleted == 'False')

        status = constants.STATUS_ACTIVE

        for access_rule in connection.execute(access_mappings_query):

            if (access_rule['state'] == constants.STATUS_DELETING or
                    access_rule['state'] not in priorities):
                continue

            if priorities[access_rule['state']] > priorities[status]:
                status = access_rule['state']

        # pylint: disable=no-value-for-parameter
        op.execute(
            share_instances_table.update().where(
                share_instances_table.c.id == instance['id']
            ).values({'access_rules_status': upgrade_data_mapping[status]})
        )

    op.drop_column('share_instance_access_map', 'state')
    def check_upgrade(self, engine, data):
        na_table = utils.load_table(self.na_table_name, engine)
        for na in engine.execute(na_table.select()):
            self.test_case.assertTrue(hasattr(na, 'gateway'))

        # Create network allocation
        network_allocations = [
            {
                'id': self.na_ids[2],
                'share_server_id': na.share_server_id,
                'ip_address': '3.3.3.3',
                'gateway': '3.3.3.1',
                'network_type': 'vlan',
                'segmentation_id': 1005,
                'ip_version': 4,
                'cidr': '240.0.0.0/16',
            },
        ]
        engine.execute(na_table.insert(network_allocations))

        # Select network allocations with gateway info
        for na in engine.execute(
                na_table.select().where(na_table.c.gateway == '3.3.3.1')):
            self.test_case.assertTrue(hasattr(na, 'gateway'))
            self.test_case.assertEqual(network_allocations[0]['gateway'],
                                       getattr(na, 'gateway'))

        sn_table = utils.load_table(self.sn_table_name, engine)
        for sn in engine.execute(sn_table.select()):
            self.test_case.assertTrue(hasattr(sn, 'gateway'))

        # Create share network
        share_networks = [
            {
                'id': self.sn_ids[1],
                'user_id': sn.user_id,
                'project_id': sn.project_id,
                'gateway': '1.1.1.1',
                'name': 'name_foo',
            },
        ]
        engine.execute(sn_table.insert(share_networks))

        # Select share network
        for sn in engine.execute(
                sn_table.select().where(sn_table.c.name == 'name_foo')):
            self.test_case.assertTrue(hasattr(sn, 'gateway'))
            self.test_case.assertEqual(share_networks[0]['gateway'],
                                       getattr(sn, 'gateway'))
예제 #37
0
    def check_upgrade(self, engine, data):
        na_table = utils.load_table(self.na_table_name, engine)
        for na in engine.execute(na_table.select()):
            self.test_case.assertTrue(hasattr(na, 'gateway'))

        # Create network allocation
        network_allocations = [
            {
                'id': self.na_ids[2],
                'share_server_id': na.share_server_id,
                'ip_address': '3.3.3.3',
                'gateway': '3.3.3.1',
                'network_type': 'vlan',
                'segmentation_id': 1005,
                'ip_version': 4,
                'cidr': '240.0.0.0/16',
            },
        ]
        engine.execute(na_table.insert(network_allocations))

        # Select network allocations with gateway info
        for na in engine.execute(
                na_table.select().where(na_table.c.gateway == '3.3.3.1')):
            self.test_case.assertTrue(hasattr(na, 'gateway'))
            self.test_case.assertEqual(network_allocations[0]['gateway'],
                                       getattr(na, 'gateway'))

        sn_table = utils.load_table(self.sn_table_name, engine)
        for sn in engine.execute(sn_table.select()):
            self.test_case.assertTrue(hasattr(sn, 'gateway'))

        # Create share network
        share_networks = [
            {
                'id': self.sn_ids[1],
                'user_id': sn.user_id,
                'project_id': sn.project_id,
                'gateway': '1.1.1.1',
                'name': 'name_foo',
            },
        ]
        engine.execute(sn_table.insert(share_networks))

        # Select share network
        for sn in engine.execute(
                sn_table.select().where(sn_table.c.name == 'name_foo')):
            self.test_case.assertTrue(hasattr(sn, 'gateway'))
            self.test_case.assertEqual(share_networks[0]['gateway'],
                                       getattr(sn, 'gateway'))
예제 #38
0
    def check_downgrade(self, engine):

        shares_table = utils.load_table('shares', engine)
        share_instances_table = utils.load_table('share_instances', engine)

        for instance in engine.execute(share_instances_table.select().where(
                share_instances_table.c.id in self.instance_ids)):
            self.test_case.assertNotIn('share_type_id', instance)

        for share in engine.execute(share_instances_table.select().where(
                shares_table.c.id in self.share_ids)):
            self.test_case.assertEqual(
                next((x for x in self.some_shares if share['id'] == x['id']),
                     None)['share_type_id'],
                share['share_type_id'])
def remove_share_instances_table(connection):
    with op.batch_alter_table("shares") as batch_op:
        batch_op.add_column(Column('host', String(length=255)))
        batch_op.add_column(Column('status', String(length=255)))
        batch_op.add_column(Column('scheduled_at', DateTime))
        batch_op.add_column(Column('launched_at', DateTime))
        batch_op.add_column(Column('terminated_at', DateTime))
        batch_op.add_column(
            Column('share_network_id',
                   String(length=36),
                   ForeignKey('share_networks.id'),
                   nullable=True))
        batch_op.add_column(
            Column('share_server_id',
                   String(length=36),
                   ForeignKey('share_servers.id'),
                   nullable=True))
        batch_op.add_column(Column('availability_zone', String(length=255)))

    shares_table = utils.load_table('shares', connection)
    share_inst_table = utils.load_table('share_instances', connection)

    for share in connection.execute(shares_table.select()):
        instance = connection.execute(share_inst_table.select().where(
            share_inst_table.c.share_id == share.id)).first()

        # pylint: disable=no-value-for-parameter
        op.execute(
            shares_table.update().where(shares_table.c.id == share.id).values({
                'host':
                instance['host'],
                'status':
                instance['status'],
                'scheduled_at':
                instance['scheduled_at'],
                'launched_at':
                instance['launched_at'],
                'terminated_at':
                instance['terminated_at'],
                'share_network_id':
                instance['share_network_id'],
                'share_server_id':
                instance['share_server_id'],
                'availability_zone':
                instance['availability_zone'],
            }))

    op.drop_table('share_instances')
예제 #40
0
    def check_upgrade(self, engine, data):
        na_table = utils.load_table(self.table_name, engine)
        for na in engine.execute(na_table.select()):
            self.test_case.assertTrue(hasattr(na, 'label'))
            self.test_case.assertEqual(na.label, 'user')

        # Create admin network allocation
        network_allocations = [
            {'id': self.ids[2],
             'share_server_id': na.share_server_id,
             'ip_address': '3.3.3.3',
             'label': 'admin',
             'network_type': 'vlan',
             'segmentation_id': 1005,
             'ip_version': 4,
             'cidr': '240.0.0.0/16'},
        ]
        engine.execute(na_table.insert(network_allocations))

        # Select admin network allocations
        for na in engine.execute(
                na_table.select().where(na_table.c.label == 'admin')):
            self.test_case.assertTrue(hasattr(na, 'label'))
            self.test_case.assertEqual('admin', na.label)
            for col_name in ('network_type', 'segmentation_id', 'ip_version',
                             'cidr'):
                self.test_case.assertTrue(hasattr(na, col_name))
                self.test_case.assertEqual(
                    network_allocations[0][col_name], getattr(na, col_name))
def upgrade():

    try:
        op.add_column('share_servers', sa.Column(
            'is_auto_deletable', sa.Boolean, default=True))
        op.add_column('share_servers', sa.Column(
            'identifier', sa.String(length=255), default=None))
    except Exception:
        LOG.error("Columns share_servers.is_auto_deletable "
                  "and/or share_servers.identifier not created!")
        raise

    try:
        connection = op.get_bind()
        share_servers_table = utils.load_table('share_servers', connection)
        for server in connection.execute(share_servers_table.select()):
            # pylint: disable=no-value-for-parameter
            connection.execute(
                share_servers_table.update().where(
                    share_servers_table.c.id == server.id,
                ).values({"identifier": server.id, "is_auto_deletable": True}))
    except Exception:
        LOG.error(
            "Could not initialize share_servers.is_auto_deletable to True"
            " and share_servers.identifier with the share server ID!")
        raise
 def check_downgrade(self, engine):
     na_table = utils.load_table(self.table_name, engine)
     db_result = engine.execute(na_table.select())
     self.test_case.assertTrue(db_result.rowcount >= len(self.ids))
     for na in db_result:
         for col_name in ("label", "network_type", "segmentation_id", "ip_version", "cidr"):
             self.test_case.assertFalse(hasattr(na, col_name))
def upgrade():

    try:
        op.add_column('share_servers',
                      sa.Column('is_auto_deletable', sa.Boolean, default=True))
        op.add_column(
            'share_servers',
            sa.Column('identifier', sa.String(length=255), default=None))
    except Exception:
        LOG.error("Columns share_servers.is_auto_deletable "
                  "and/or share_servers.identifier not created!")
        raise

    try:
        connection = op.get_bind()
        share_servers_table = utils.load_table('share_servers', connection)
        for server in connection.execute(share_servers_table.select()):
            # pylint: disable=no-value-for-parameter
            connection.execute(share_servers_table.update().where(
                share_servers_table.c.id == server.id, ).values({
                    "identifier":
                    server.id,
                    "is_auto_deletable":
                    True
                }))
    except Exception:
        LOG.error(
            "Could not initialize share_servers.is_auto_deletable to True"
            " and share_servers.identifier with the share server ID!")
        raise
예제 #44
0
 def check_downgrade(self, engine):
     el_table = utils.load_table('share_instance_export_locations', engine)
     for el in engine.execute(el_table.select()):
         self.test_case.assertFalse(hasattr(el, 'is_admin_only'))
         self.test_case.assertFalse(hasattr(el, 'uuid'))
     self.test_case.assertRaises(sa_exc.NoSuchTableError, utils.load_table,
                                 self.elm_table_name, engine)
    def check_upgrade(self, engine, data):
        na_table = utils.load_table(self.table_name, engine)
        for na in engine.execute(na_table.select()):
            self.test_case.assertTrue(hasattr(na, 'label'))
            self.test_case.assertEqual(na.label, 'user')

        # Create admin network allocation
        network_allocations = [
            {'id': self.ids[2],
             'share_server_id': na.share_server_id,
             'ip_address': '3.3.3.3',
             'label': 'admin',
             'network_type': 'vlan',
             'segmentation_id': 1005,
             'ip_version': 4,
             'cidr': '240.0.0.0/16'},
        ]
        engine.execute(na_table.insert(network_allocations))

        # Select admin network allocations
        for na in engine.execute(
                na_table.select().where(na_table.c.label == 'admin')):
            self.test_case.assertTrue(hasattr(na, 'label'))
            self.test_case.assertEqual('admin', na.label)
            for col_name in ('network_type', 'segmentation_id', 'ip_version',
                             'cidr'):
                self.test_case.assertTrue(hasattr(na, col_name))
                self.test_case.assertEqual(
                    network_allocations[0][col_name], getattr(na, col_name))
def upgrade():
    op.create_table(
        'share_snapshot_access_map',
        sa.Column('id', sa.String(36), primary_key=True),
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.String(36), default='False'),
        sa.Column('share_snapshot_id', sa.String(36),
                  sa.ForeignKey('share_snapshots.id',
                                name='ssam_snapshot_fk')),
        sa.Column('access_type', sa.String(255)),
        sa.Column('access_to', sa.String(255))
    )

    op.create_table(
        'share_snapshot_instance_access_map',
        sa.Column('id', sa.String(36), primary_key=True),
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.String(36), default='False'),
        sa.Column('share_snapshot_instance_id', sa.String(36),
                  sa.ForeignKey('share_snapshot_instances.id',
                                name='ssiam_snapshot_instance_fk')),
        sa.Column('access_id', sa.String(36),
                  sa.ForeignKey('share_snapshot_access_map.id',
                                name='ssam_access_fk')),
        sa.Column('state', sa.String(255),
                  default=constants.ACCESS_STATE_QUEUED_TO_APPLY)
    )

    op.create_table(
        'share_snapshot_instance_export_locations',
        sa.Column('id', sa.String(36), primary_key=True),
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.String(36), default='False'),
        sa.Column('share_snapshot_instance_id', sa.String(36),
                  sa.ForeignKey('share_snapshot_instances.id',
                                name='ssiel_snapshot_instance_fk')),
        sa.Column('path', sa.String(2000)),
        sa.Column('is_admin_only', sa.Boolean, default=False, nullable=False)
    )

    op.add_column('shares',
                  sa.Column('mount_snapshot_support', sa.Boolean,
                            default=False))

    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)

    # pylint: disable=no-value-for-parameter
    op.execute(
        shares_table.update().where(
            shares_table.c.deleted == 'False').values({
                'mount_snapshot_support': False,
            })
    )
    def check_upgrade(self, engine, data):
        na_table = utils.load_table(self.table_name, engine)
        for na in engine.execute(na_table.select()):
            self.test_case.assertTrue(hasattr(na, "label"))
            self.test_case.assertEqual(na.label, "user")

        # Create admin network allocation
        network_allocations = [
            {
                "id": self.ids[2],
                "share_server_id": na.share_server_id,
                "ip_address": "3.3.3.3",
                "label": "admin",
                "network_type": "vlan",
                "segmentation_id": 1005,
                "ip_version": 4,
                "cidr": "240.0.0.0/16",
            }
        ]
        engine.execute(na_table.insert(network_allocations))

        # Select admin network allocations
        for na in engine.execute(na_table.select().where(na_table.c.label == "admin")):
            self.test_case.assertTrue(hasattr(na, "label"))
            self.test_case.assertEqual("admin", na.label)
            for col_name in ("network_type", "segmentation_id", "ip_version", "cidr"):
                self.test_case.assertTrue(hasattr(na, col_name))
                self.test_case.assertEqual(network_allocations[0][col_name], getattr(na, col_name))
def upgrade():
    op.create_table(
        'share_snapshot_access_map',
        sa.Column('id', sa.String(36), primary_key=True),
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.String(36), default='False'),
        sa.Column('share_snapshot_id', sa.String(36),
                  sa.ForeignKey('share_snapshots.id',
                                name='ssam_snapshot_fk')),
        sa.Column('access_type', sa.String(255)),
        sa.Column('access_to', sa.String(255)))

    op.create_table(
        'share_snapshot_instance_access_map',
        sa.Column('id', sa.String(36), primary_key=True),
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.String(36), default='False'),
        sa.Column(
            'share_snapshot_instance_id', sa.String(36),
            sa.ForeignKey('share_snapshot_instances.id',
                          name='ssiam_snapshot_instance_fk')),
        sa.Column(
            'access_id', sa.String(36),
            sa.ForeignKey('share_snapshot_access_map.id',
                          name='ssam_access_fk')),
        sa.Column('state',
                  sa.String(255),
                  default=constants.ACCESS_STATE_QUEUED_TO_APPLY))

    op.create_table(
        'share_snapshot_instance_export_locations',
        sa.Column('id', sa.String(36), primary_key=True),
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.String(36), default='False'),
        sa.Column(
            'share_snapshot_instance_id', sa.String(36),
            sa.ForeignKey('share_snapshot_instances.id',
                          name='ssiel_snapshot_instance_fk')),
        sa.Column('path', sa.String(2000)),
        sa.Column('is_admin_only', sa.Boolean, default=False, nullable=False))

    op.add_column(
        'shares', sa.Column('mount_snapshot_support',
                            sa.Boolean,
                            default=False))

    connection = op.get_bind()
    shares_table = utils.load_table('shares', connection)

    op.execute(
        shares_table.update().where(shares_table.c.deleted == 'False').values({
            'mount_snapshot_support':
            False,
        }))
    def _check_length_for_table_columns(self, table_name, engine, cols, length):
        table = utils.load_table(table_name, engine)
        db_result = engine.execute(table.select())
        self.test_case.assertTrue(db_result.rowcount > 0)

        for col in cols:
            self.test_case.assertEqual(table.columns.get(col).type.length, length)
 def check_downgrade(self, engine):
     for table_name, ids in ((self.na_table_name, self.na_ids), (self.sn_table_name, self.sn_ids)):
         table = utils.load_table(table_name, engine)
         db_result = engine.execute(table.select())
         self.test_case.assertTrue(db_result.rowcount >= len(ids))
         for record in db_result:
             self.test_case.assertFalse(hasattr(record, "gateway"))
def create_snapshot_instances_table(connection):
    # Create 'share_snapshot_instances' table
    snapshot_instances_table = op.create_table(
        'share_snapshot_instances',
        Column('created_at', DateTime),
        Column('updated_at', DateTime),
        Column('deleted_at', DateTime),
        Column('deleted', String(length=36), default='False'),
        Column('id', String(length=36), primary_key=True, nullable=False),
        Column('snapshot_id', String(length=36),
               ForeignKey('share_snapshots.id', name="ssi_snapshot_fk")),
        Column('share_instance_id', String(length=36),
               ForeignKey('share_instances.id', name="ssi_share_instance_fk")),
        Column('status', String(length=255)),
        Column('progress', String(length=255)),
        mysql_engine='InnoDB',
        mysql_charset='utf8'
    )

    # Migrate data from share_snapshots to share_snapshot_instances
    snapshot_instances = []
    snapshot_table = utils.load_table('share_snapshots', connection)
    share_instances_table = utils.load_table('share_instances', connection)

    for snapshot in connection.execute(snapshot_table.select()):
        share_instances_rows = connection.execute(
            share_instances_table.select().where(
                share_instances_table.c.share_id == snapshot.share_id
            )
        )
        snapshot_instances.append({
            'created_at': snapshot.created_at,
            'updated_at': snapshot.updated_at,
            'deleted_at': snapshot.deleted_at,
            'deleted': snapshot.deleted,
            'id': snapshot.id,
            'snapshot_id': snapshot.id,
            'status': snapshot.status,
            'progress': snapshot.progress,
            'snapshot_instance_id': share_instances_rows.first().id,
        })
    op.bulk_insert(snapshot_instances_table, snapshot_instances)

    # Remove columns moved to 'share_snapshot_instances' table
    with op.batch_alter_table("share_snapshots") as batch_op:
        batch_op.drop_column('status')
        batch_op.drop_column('progress')
예제 #52
0
def _transform_case(table_name, make_upper):
    connection = op.get_bind()
    table = utils.load_table(table_name, connection)
    case = sa.func.upper if make_upper else sa.func.lower

    for row in connection.execute(table.select()):
        op.execute(table.update().where(  # pylint: disable=no-value-for-parameter
            table.c.id == row.id).values({'status': case(row.status)}))
    def check_downgrade(self, engine):
        share_instances_rules_table = utils.load_table("share_instance_access_map", engine)

        valid_statuses = {"1": "active", "2": "error", "3": "error", "4": None}

        for rule in engine.execute(share_instances_rules_table.select()):
            valid_state = valid_statuses[rule["share_instance_id"]]
            self.test_case.assertEqual(valid_state, rule["state"])
 def check_downgrade(self, engine):
     for table_name, ids in ((self.na_table_name, self.na_ids),
                             (self.sn_table_name, self.sn_ids)):
         table = utils.load_table(table_name, engine)
         db_result = engine.execute(table.select())
         self.test_case.assertTrue(db_result.rowcount >= len(ids))
         for record in db_result:
             self.test_case.assertFalse(hasattr(record, 'gateway'))
예제 #55
0
 def check_downgrade(self, engine):
     ss_table = utils.load_table(self.table_name, engine)
     db_result = engine.execute(ss_table.select())
     self.test_case.assertTrue(db_result.rowcount > 0)
     for ss in db_result:
         self.test_case.assertFalse(hasattr(ss, 'provider_location'))
         self.test_case.assertEqual('new_snapshot_instance_id', ss.id)
         self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
 def check_downgrade(self, engine):
     ss_table = utils.load_table(self.table_name, engine)
     db_result = engine.execute(ss_table.select())
     self.test_case.assertTrue(db_result.rowcount > 0)
     for ss in db_result:
         self.test_case.assertFalse(hasattr(ss, 'provider_location'))
         self.test_case.assertEqual('new_snapshot_instance_id', ss.id)
         self.test_case.assertEqual('new_snapshot_id', ss.snapshot_id)
 def check_downgrade(self, engine):
     na_table = utils.load_table(self.table_name, engine)
     db_result = engine.execute(na_table.select())
     self.test_case.assertTrue(db_result.rowcount >= len(self.ids))
     for na in db_result:
         for col_name in ('label', 'network_type', 'segmentation_id',
                          'ip_version', 'cidr'):
             self.test_case.assertFalse(hasattr(na, col_name))