Exemple #1
0
    def test_access_rules_deleted_if_share_deleted(self, version):
        if (utils.is_microversion_lt(version, '2.13')
                and CONF.share.enable_cephx_rules_for_protocols):
            msg = ("API version %s does not support cephx access type, need "
                   "version >= 2.13." % version)
            raise self.skipException(msg)
        if utils.is_microversion_le(version, '2.9'):
            client = self.shares_client
        else:
            client = self.shares_v2_client

        # create share
        share = self.create_share(share_type_id=self.share_type_id)

        # create rule
        rule = self.allow_access(share["id"],
                                 client=client,
                                 access_type=self.access_type,
                                 access_to=self.access_to,
                                 version=version,
                                 cleanup=False)

        # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
        if utils.is_microversion_le(version, "2.27"):
            self.assertEqual("new", rule['state'])
        else:
            self.assertEqual("queued_to_apply", rule['state'])

        # delete share
        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.delete_share(share['id'])
            self.shares_client.wait_for_resource_deletion(share_id=share['id'])
        else:
            self.shares_v2_client.delete_share(share['id'], version=version)
            self.shares_v2_client.wait_for_resource_deletion(
                share_id=share['id'], version=version)

        # verify absence of rules for nonexistent share id
        if utils.is_microversion_eq(version, '1.0'):
            self.assertRaises(lib_exc.NotFound,
                              self.shares_client.list_access_rules,
                              share['id'])
        elif utils.is_microversion_lt(version, '2.45'):
            self.assertRaises(lib_exc.NotFound,
                              self.shares_v2_client.list_access_rules,
                              share['id'], version)
        else:
            self.assertRaises(lib_exc.BadRequest,
                              self.shares_v2_client.list_access_rules,
                              share['id'], version)
    def test_read_write_two_vms(self):
        """Boots two vms and writes/reads data on it."""
        test_data = "Some test data to write"

        # Boot two VMs and create share
        instance1 = self.boot_instance(wait_until="BUILD")
        instance2 = self.boot_instance(wait_until="BUILD")
        self.create_share()
        instance1 = self.wait_for_active_instance(instance1["id"])
        instance2 = self.wait_for_active_instance(instance2["id"])

        # Write data to first VM
        ssh_client_inst1 = self.init_ssh(instance1)
        self.provide_access_to_auxiliary_instance(instance1)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        self.mount_share(locations[0], ssh_client_inst1)
        self.addCleanup(self.umount_share,
                        ssh_client_inst1)
        self.write_data(test_data, ssh_client_inst1)

        # Read from second VM
        ssh_client_inst2 = self.init_ssh(instance2)
        self.provide_access_to_auxiliary_instance(instance2)
        self.mount_share(locations[0], ssh_client_inst2)
        self.addCleanup(self.umount_share,
                        ssh_client_inst2)
        data = self.read_data(ssh_client_inst2)
        self.assertEqual(test_data, data)
    def _list_shares_with_detail(self, version):

        # list shares
        shares = self.shares_v2_client.list_shares_with_detail(
            version=six.text_type(version))

        # verify keys
        keys = [
            "status", "description", "links", "availability_zone",
            "created_at", "project_id", "volume_type", "share_proto", "name",
            "snapshot_id", "id", "size", "share_network_id", "metadata",
            "host", "snapshot_id", "is_public", "share_type",
        ]
        if utils.is_microversion_lt(version, '2.9'):
            keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.2'):
            keys.append("snapshot_support")
        if utils.is_microversion_ge(version, '2.4'):
            keys.extend(["consistency_group_id",
                         "source_cgsnapshot_member_id"])
        if utils.is_microversion_ge(version, '2.6'):
            keys.append("share_type_name")
        if utils.is_microversion_ge(version, '2.10'):
            keys.append("access_rules_status")
        if utils.is_microversion_ge(version, '2.11'):
            keys.append("replication_type")
        if utils.is_microversion_ge(version, '2.16'):
            keys.append("user_id")
        [self.assertIn(key, sh.keys()) for sh in shares for key in keys]

        # our shares in list and have no duplicates
        for share in self.shares:
            gen = [sid["id"] for sid in shares if sid["id"] in share["id"]]
            msg = "expected id lists %s times in share list" % (len(gen))
            self.assertEqual(1, len(gen), msg)
    def _get_share_instance(self, version):
        """Test that we get the proper keys back for the instance."""
        share_instances = self.shares_v2_client.get_instances_of_share(self.share["id"], version=version)

        si = self.shares_v2_client.get_share_instance(share_instances[0]["id"], version=version)

        expected_keys = [
            "host",
            "share_id",
            "id",
            "share_network_id",
            "status",
            "availability_zone",
            "share_server_id",
            "created_at",
        ]
        if utils.is_microversion_lt(version, "2.9"):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, "2.10"):
            expected_keys.append("access_rules_status")
        expected_keys = sorted(expected_keys)
        actual_keys = sorted(si.keys())
        self.assertEqual(
            expected_keys,
            actual_keys,
            "Share instance %s returned incorrect keys; "
            "expected %s, got %s." % (si["id"], expected_keys, actual_keys),
        )
    def test_read_write_two_vms(self):
        """Boots two vms and writes/reads data on it."""
        test_data = "Some test data to write"
        self.security_group = self._create_security_group()
        self.create_share()

        # boot first VM and write data
        instance1 = self.boot_instance()
        self.allow_access_ip(self.share['id'], instance=instance1,
                             cleanup=False)
        ssh_client_inst1 = self.init_ssh(instance1)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        self.mount_share(locations[0], ssh_client_inst1)
        self.addCleanup(self.umount_share,
                        ssh_client_inst1)
        self.write_data(test_data, ssh_client_inst1)

        # boot second VM and read
        instance2 = self.boot_instance()
        self.allow_access_ip(self.share['id'], instance=instance2)
        ssh_client_inst2 = self.init_ssh(instance2)
        self.mount_share(locations[0], ssh_client_inst2)
        self.addCleanup(self.umount_share,
                        ssh_client_inst2)
        data = self.read_data(ssh_client_inst2)
        self.assertEqual(test_data, data)
Exemple #6
0
    def test_read_write_two_vms(self):
        """Boots two vms and writes/reads data on it."""
        test_data = "Some test data to write"

        # Boot two VMs and create share
        instance1 = self.boot_instance(wait_until="BUILD")
        instance2 = self.boot_instance(wait_until="BUILD")
        self.create_share()
        instance1 = self.wait_for_active_instance(instance1["id"])
        instance2 = self.wait_for_active_instance(instance2["id"])

        # Write data to first VM
        ssh_client_inst1 = self.init_ssh(instance1)
        self.provide_access_to_auxiliary_instance(instance1)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        self.mount_share(locations[0], ssh_client_inst1)
        self.addCleanup(self.umount_share, ssh_client_inst1)
        self.write_data(test_data, ssh_client_inst1)

        # Read from second VM
        ssh_client_inst2 = self.init_ssh(instance2)
        self.provide_access_to_auxiliary_instance(instance2)
        self.mount_share(locations[0], ssh_client_inst2)
        self.addCleanup(self.umount_share, ssh_client_inst2)
        data = self.read_data(ssh_client_inst2)
        self.assertEqual(test_data, data)
    def _get_share_instance(self, version):
        """Test that we get the proper keys back for the instance."""
        share_instances = self.shares_v2_client.get_instances_of_share(
            self.share['id'], version=version,
        )

        si = self.shares_v2_client.get_share_instance(
            share_instances[0]['id'], version=version)

        expected_keys = [
            'host', 'share_id', 'id', 'share_network_id', 'status',
            'availability_zone', 'share_server_id', 'created_at',
        ]
        if utils.is_microversion_lt(version, '2.9'):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.10'):
            expected_keys.append("access_rules_status")
        if utils.is_microversion_ge(version, '2.11'):
            expected_keys.append("replica_state")
        if utils.is_microversion_ge(version, '2.22'):
            expected_keys.append("share_type_id")
        if utils.is_microversion_ge(version, '2.30'):
            expected_keys.append("cast_rules_to_readonly")
        expected_keys = sorted(expected_keys)
        actual_keys = sorted(si.keys())
        self.assertEqual(expected_keys, actual_keys,
                         'Share instance %s returned incorrect keys; '
                         'expected %s, got %s.' % (
                             si['id'], expected_keys, actual_keys))
Exemple #8
0
    def _validate_migration_successful(self, dest_pool, share,
                                       old_exports, version, notify=True):
        if utils.is_microversion_lt(version, '2.9'):
            new_exports = share['export_locations']
            self.assertNotEmpty(new_exports)
        else:
            new_exports = self.shares_v2_client.list_share_export_locations(
                share['id'], version='2.9')
            self.assertNotEmpty(new_exports)
            new_exports = [x['path'] for x in new_exports if
                           x['is_admin_only'] is False]
            self.assertNotEmpty(new_exports)

        # Share migrated
        if notify:
            self.assertEqual(dest_pool, share['host'])
            for export in old_exports:
                self.assertFalse(export in new_exports)
            self.assertEqual('migration_success', share['task_state'])
        # Share not migrated yet
        else:
            self.assertNotEqual(dest_pool, share['host'])
            for export in old_exports:
                self.assertTrue(export in new_exports)
            self.assertEqual('data_copying_completed', share['task_state'])
    def _list_shares_with_detail(self, version):

        # list shares
        shares = self.shares_v2_client.list_shares_with_detail(
            version=six.text_type(version))

        # verify keys
        keys = [
            "status", "description", "links", "availability_zone",
            "created_at", "project_id", "volume_type", "share_proto", "name",
            "snapshot_id", "id", "size", "share_network_id", "metadata",
            "host", "snapshot_id", "is_public", "share_type",
        ]
        if utils.is_microversion_lt(version, '2.9'):
            keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.2'):
            keys.append("snapshot_support")
        if utils.is_microversion_ge(version, '2.4'):
            keys.extend(["consistency_group_id",
                         "source_cgsnapshot_member_id"])
        if utils.is_microversion_ge(version, '2.6'):
            keys.append("share_type_name")

        [self.assertIn(key, sh.keys()) for sh in shares for key in keys]

        # our shares in list and have no duplicates
        for share in self.shares:
            gen = [sid["id"] for sid in shares if sid["id"] in share["id"]]
            msg = "expected id lists %s times in share list" % (len(gen))
            self.assertEqual(1, len(gen), msg)
Exemple #10
0
    def test_create_duplicate_single_host_rules(self, access_to):
        """Test rules for individual clients with and without max-prefix."""
        if ':' in access_to and utils.is_microversion_lt(
                CONF.share.max_api_microversion, '2.38'):
            reason = ("Skipped. IPv6 rules are accepted from and beyond "
                      "API version 2.38, the configured maximum API version "
                      "is %s" % CONF.share.max_api_microversion)
            raise self.skipException(reason)

        rule = self.shares_v2_client.create_access_rule(
            self.share["id"], "ip", access_to)
        self.addCleanup(self.shares_v2_client.delete_access_rule,
                        self.share["id"], rule['id'])
        self.shares_v2_client.wait_for_share_status(
            self.share["id"], "active", status_attr='access_rules_status')

        self.assertRaises(lib_exc.BadRequest,
                          self.shares_v2_client.create_access_rule,
                          self.share["id"], "ip", access_to)

        if '/' in access_to:
            access_to = access_to.split("/")[0]
        else:
            access_to = ('%s/32' %
                         access_to if '.' in access_to else '%s/128' %
                         access_to)

        self.assertRaises(lib_exc.BadRequest,
                          self.shares_v2_client.create_access_rule,
                          self.share["id"], "ip", access_to)
    def _get_share_instance(self, version):
        """Test that we get the proper keys back for the instance."""
        share_instances = self.shares_v2_client.get_instances_of_share(
            self.share['id'],
            version=version,
        )

        si = self.shares_v2_client.get_share_instance(share_instances[0]['id'],
                                                      version=version)

        expected_keys = [
            'host',
            'share_id',
            'id',
            'share_network_id',
            'status',
            'availability_zone',
            'share_server_id',
            'created_at',
        ]
        if utils.is_microversion_lt(version, '2.9'):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.10'):
            expected_keys.append("access_rules_status")
        expected_keys = sorted(expected_keys)
        actual_keys = sorted(si.keys())
        self.assertEqual(
            expected_keys, actual_keys,
            'Share instance %s returned incorrect keys; '
            'expected %s, got %s.' % (si['id'], expected_keys, actual_keys))
    def test_read_write_two_vms(self):
        """Boots two vms and writes/reads data on it."""
        test_data = "Some test data to write"
        self.security_group = self._create_security_group()
        self.create_share()

        # boot first VM and write data
        instance1 = self.boot_instance()
        self.allow_access_ip(self.share['id'],
                             instance=instance1,
                             cleanup=False)
        ssh_client_inst1 = self.init_ssh(instance1)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        self.mount_share(locations[0], ssh_client_inst1)
        self.addCleanup(self.umount_share, ssh_client_inst1)
        self.write_data(test_data, ssh_client_inst1)

        # boot second VM and read
        instance2 = self.boot_instance()
        self.allow_access_ip(self.share['id'], instance=instance2)
        ssh_client_inst2 = self.init_ssh(instance2)
        self.mount_share(locations[0], ssh_client_inst2)
        self.addCleanup(self.umount_share, ssh_client_inst2)
        data = self.read_data(ssh_client_inst2)
        self.assertEqual(test_data, data)
Exemple #13
0
    def _get_share(self, version):

        # get share
        share = self.shares_v2_client.get_share(self.shares[0]['id'],
                                                version=six.text_type(version))

        # verify keys
        expected_keys = [
            "status",
            "description",
            "links",
            "availability_zone",
            "created_at",
            "project_id",
            "volume_type",
            "share_proto",
            "name",
            "snapshot_id",
            "id",
            "size",
            "share_network_id",
            "metadata",
            "snapshot_id",
            "is_public",
        ]
        if utils.is_microversion_lt(version, '2.9'):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.2'):
            expected_keys.append("snapshot_support")
        if utils.is_microversion_ge(version, '2.5'):
            expected_keys.append("share_type_name")
        if utils.is_microversion_ge(version, '2.10'):
            expected_keys.append("access_rules_status")
        if utils.is_microversion_ge(version, '2.11'):
            expected_keys.append("replication_type")
        if utils.is_microversion_ge(version, '2.16'):
            expected_keys.append("user_id")
        if utils.is_microversion_ge(version, '2.24'):
            expected_keys.append("create_share_from_snapshot_support")
        if utils.is_microversion_ge(version,
                                    constants.REVERT_TO_SNAPSHOT_MICROVERSION):
            expected_keys.append("revert_to_snapshot_support")
        actual_keys = list(share.keys())
        [self.assertIn(key, actual_keys) for key in expected_keys]

        # verify values
        msg = "Expected name: '%s', actual name: '%s'" % (self.share_name,
                                                          share["name"])
        self.assertEqual(self.share_name, six.text_type(share["name"]), msg)

        msg = ("Expected description: '%s', "
               "actual description: '%s'" %
               (self.share_desc, share["description"]))
        self.assertEqual(self.share_desc, six.text_type(share["description"]),
                         msg)

        msg = "Expected size: '%s', actual size: '%s'" % (
            CONF.share.share_size, share["size"])
        self.assertEqual(CONF.share.share_size, int(share["size"]), msg)
Exemple #14
0
    def test_list_access_rules(self, version):
        if (utils.is_microversion_lt(version, '2.13') and
                CONF.share.enable_cephx_rules_for_protocols):
            msg = ("API version %s does not support cephx access type, "
                   "need version greater than 2.13." % version)
            raise self.skipException(msg)

        # create rule
        if utils.is_microversion_eq(version, '1.0'):
            rule = self.shares_client.create_access_rule(
                self.share["id"], self.access_type, self.access_to)
        else:
            rule = self.shares_v2_client.create_access_rule(
                self.share["id"], self.access_type, self.access_to,
                version=version)

        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.wait_for_access_rule_status(
                self.share["id"], rule["id"], "active")
        elif utils.is_microversion_eq(version, '2.9'):
            self.shares_v2_client.wait_for_access_rule_status(
                self.share["id"], rule["id"], "active")
        else:
            self.shares_v2_client.wait_for_share_status(
                self.share["id"], "active", status_attr='access_rules_status',
                version=version)

        # list rules
        if utils.is_microversion_eq(version, '1.0'):
            rules = self.shares_client.list_access_rules(self.share["id"])
        else:
            rules = self.shares_v2_client.list_access_rules(self.share["id"],
                                                            version=version)

        # verify keys
        for key in ("id", "access_type", "access_to", "access_level"):
            [self.assertIn(key, r.keys()) for r in rules]
        for key in ('deleted', 'deleted_at', 'instance_mappings'):
            [self.assertNotIn(key, r.keys()) for r in rules]

        # verify values
        self.assertEqual(self.access_type, rules[0]["access_type"])
        self.assertEqual(self.access_to, rules[0]["access_to"])
        self.assertEqual('rw', rules[0]["access_level"])

        # our share id in list and have no duplicates
        gen = [r["id"] for r in rules if r["id"] in rule["id"]]
        msg = "expected id lists %s times in rule list" % (len(gen))
        self.assertEqual(1, len(gen), msg)

        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
            self.shares_client.wait_for_resource_deletion(
                rule_id=rule["id"], share_id=self.share['id'])
        else:
            self.shares_v2_client.delete_access_rule(
                self.share["id"], rule["id"], version=version)
            self.shares_v2_client.wait_for_resource_deletion(
                rule_id=rule["id"], share_id=self.share['id'], version=version)
 def get_share_export_locations(self, share):
     if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
         locations = share['export_locations']
     else:
         exports = self.shares_v2_client.list_share_export_locations(
             share['id'])
         locations = [x['path'] for x in exports]
     return locations
Exemple #16
0
def skip_if_cephx_access_type_not_supported_by_client(self, client):
    if client == 'shares_client':
        version = '1.0'
    else:
        version = LATEST_MICROVERSION
    if (CONF.share.enable_cephx_rules_for_protocols
            and utils.is_microversion_lt(version, '2.13')):
        msg = ("API version %s does not support cephx access type, need "
               "version >= 2.13." % version)
        raise self.skipException(msg)
Exemple #17
0
def skip_if_cephx_access_type_not_supported_by_client(self, client):
    if client == 'shares_client':
        version = '1.0'
    else:
        version = LATEST_MICROVERSION
    if (CONF.share.enable_cephx_rules_for_protocols and
            utils.is_microversion_lt(version, '2.13')):
        msg = ("API version %s does not support cephx access type, "
               "need version greater than 2.13." % version)
        raise self.skipException(msg)
Exemple #18
0
    def test_access_rules_deleted_if_share_deleted(self, version):
        if (utils.is_microversion_lt(version, '2.13') and
                CONF.share.enable_cephx_rules_for_protocols):
            msg = ("API version %s does not support cephx access type, need "
                   "version >= 2.13." % version)
            raise self.skipException(msg)

        # create share
        share = self.create_share()

        # create rule
        if utils.is_microversion_eq(version, '1.0'):
            rule = self.shares_client.create_access_rule(
                share["id"], self.access_type, self.access_to)
        else:
            rule = self.shares_v2_client.create_access_rule(
                share["id"], self.access_type, self.access_to,
                version=version)

        # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
        if utils.is_microversion_le(version, "2.27"):
            self.assertEqual("new", rule['state'])
        else:
            self.assertEqual("queued_to_apply", rule['state'])

        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.wait_for_access_rule_status(
                share["id"], rule["id"], "active")
        elif utils.is_microversion_eq(version, '2.9'):
            self.shares_v2_client.wait_for_access_rule_status(
                share["id"], rule["id"], "active")
        else:
            self.shares_v2_client.wait_for_share_status(
                share["id"], "active", status_attr='access_rules_status',
                version=version)

        # delete share
        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.delete_share(share['id'])
            self.shares_client.wait_for_resource_deletion(share_id=share['id'])
        else:
            self.shares_v2_client.delete_share(share['id'], version=version)
            self.shares_v2_client.wait_for_resource_deletion(
                share_id=share['id'], version=version)

        # verify absence of rules for nonexistent share id
        if utils.is_microversion_eq(version, '1.0'):
            self.assertRaises(lib_exc.NotFound,
                              self.shares_client.list_access_rules,
                              share['id'])
        else:
            self.assertRaises(lib_exc.NotFound,
                              self.shares_v2_client.list_access_rules,
                              share['id'], version)
Exemple #19
0
    def test_access_rules_deleted_if_share_deleted(self, version):
        if (utils.is_microversion_lt(version, '2.13')
                and CONF.share.enable_cephx_rules_for_protocols):
            msg = ("API version %s does not support cephx access type, "
                   "need version greater than 2.13." % version)
            raise self.skipException(msg)

        # create share
        share = self.create_share()

        # create rule
        if utils.is_microversion_eq(version, '1.0'):
            rule = self.shares_client.create_access_rule(
                share["id"], self.access_type, self.access_to)
        else:
            rule = self.shares_v2_client.create_access_rule(share["id"],
                                                            self.access_type,
                                                            self.access_to,
                                                            version=version)

        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.wait_for_access_rule_status(
                share["id"], rule["id"], "active")
        elif utils.is_microversion_eq(version, '2.9'):
            self.shares_v2_client.wait_for_access_rule_status(
                share["id"], rule["id"], "active")
        else:
            self.shares_v2_client.wait_for_share_status(
                share["id"],
                "active",
                status_attr='access_rules_status',
                version=version)

        # delete share
        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.delete_share(share['id'])
            self.shares_client.wait_for_resource_deletion(share_id=share['id'])
        else:
            self.shares_v2_client.delete_share(share['id'], version=version)
            self.shares_v2_client.wait_for_resource_deletion(
                share_id=share['id'], version=version)

        # verify absence of rules for nonexistent share id
        if utils.is_microversion_eq(version, '1.0'):
            self.assertRaises(lib_exc.NotFound,
                              self.shares_client.list_access_rules,
                              share['id'])
        else:
            self.assertRaises(lib_exc.NotFound,
                              self.shares_v2_client.list_access_rules,
                              share['id'], version)
    def _get_share(self, version):

        # get share
        share = self.shares_v2_client.get_share(
            self.shares[0]['id'], version=six.text_type(version))

        # verify keys
        expected_keys = [
            "status", "description", "links", "availability_zone",
            "created_at", "project_id", "volume_type", "share_proto", "name",
            "snapshot_id", "id", "size", "share_network_id", "metadata",
            "snapshot_id", "is_public",
        ]
        if utils.is_microversion_lt(version, '2.9'):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.2'):
            expected_keys.append("snapshot_support")
        if utils.is_microversion_ge(version, '2.5'):
            expected_keys.append("share_type_name")
        if utils.is_microversion_ge(version, '2.10'):
            expected_keys.append("access_rules_status")
        if utils.is_microversion_ge(version, '2.11'):
            expected_keys.append("replication_type")
        if utils.is_microversion_ge(version, '2.16'):
            expected_keys.append("user_id")
        if utils.is_microversion_ge(version, '2.24'):
            expected_keys.append("create_share_from_snapshot_support")
        if utils.is_microversion_ge(version,
                                    constants.REVERT_TO_SNAPSHOT_MICROVERSION):
            expected_keys.append("revert_to_snapshot_support")
        actual_keys = list(share.keys())
        [self.assertIn(key, actual_keys) for key in expected_keys]

        # verify values
        msg = "Expected name: '%s', actual name: '%s'" % (self.share_name,
                                                          share["name"])
        self.assertEqual(self.share_name, six.text_type(share["name"]), msg)

        msg = "Expected description: '%s', "\
              "actual description: '%s'" % (self.share_desc,
                                            share["description"])
        self.assertEqual(
            self.share_desc, six.text_type(share["description"]), msg)

        msg = "Expected size: '%s', actual size: '%s'" % (
            CONF.share.share_size, share["size"])
        self.assertEqual(CONF.share.share_size, int(share["size"]), msg)
    def test_mount_share_one_vm(self):
        instance = self.boot_instance(wait_until="BUILD")
        self.create_share()
        instance = self.wait_for_active_instance(instance["id"])
        ssh_client = self.init_ssh(instance)

        self.provide_access_to_auxiliary_instance(instance)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        for location in locations:
            self.mount_share(location, ssh_client)
            self.umount_share(ssh_client)
Exemple #22
0
 def _get_user_export_location(self, share=None, snapshot=None):
     user_export_location = None
     if share:
         if utils.is_microversion_lt(CONF.share.max_api_microversion,
                                     "2.9"):
             user_export_location = share['export_locations'][0]
         else:
             exports = self.shares_v2_client.list_share_export_locations(
                 share['id'])
             locations = [x['path'] for x in exports]
             user_export_location = locations[0]
     elif snapshot:
         exports = (self.shares_v2_client.list_snapshot_export_locations(
             snapshot['id']))
         locations = [x['path'] for x in exports]
         user_export_location = locations[0]
     self.assertIsNotNone(user_export_location)
     return user_export_location
 def _get_user_export_location(self, share=None, snapshot=None):
     user_export_location = None
     if share:
         if utils.is_microversion_lt(
                 CONF.share.max_api_microversion, "2.9"):
             user_export_location = share['export_locations'][0]
         else:
             exports = self.shares_v2_client.list_share_export_locations(
                 share['id'])
             locations = [x['path'] for x in exports]
             user_export_location = locations[0]
     elif snapshot:
         exports = (self.shares_v2_client.
                    list_snapshot_export_locations(snapshot['id']))
         locations = [x['path'] for x in exports]
         user_export_location = locations[0]
     self.assertIsNotNone(user_export_location)
     return user_export_location
Exemple #24
0
    def test_mount_share_one_vm(self):
        instance = self.boot_instance(wait_until="BUILD")
        self.create_share()
        instance = self.wait_for_active_instance(instance["id"])
        ssh_client = self.init_ssh(instance)

        self.provide_access_to_auxiliary_instance(instance)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        for location in locations:
            self.mount_share(location, ssh_client)
            self.umount_share(ssh_client)
Exemple #25
0
    def _test_duplicate_rules(self, access_to):
        if ':' in access_to and utils.is_microversion_lt(
                CONF.share.max_api_microversion, '2.38'):
            reason = ("Skipped. IPv6 rules are accepted from and beyond "
                      "API version 2.38, the configured maximum API version "
                      "is %s" % CONF.share.max_api_microversion)
            raise self.skipException(reason)

        # test data
        access_type = "ip"

        # create rule
        rule = self.shares_v2_client.create_snapshot_access_rule(
            self.snap['id'], access_type, access_to)

        self.shares_v2_client.wait_for_snapshot_access_rule_status(
            self.snap['id'], rule['id'])

        # try create duplicate of rule
        self.assertRaises(lib_exc.BadRequest,
                          self.shares_v2_client.create_snapshot_access_rule,
                          self.snap["id"], access_type, access_to)

        # try alternate notation
        if '/' in access_to:
            access_to = access_to.split("/")[0]
        else:
            access_to = ('%s/32' %
                         access_to if '.' in access_to else '%s/128' %
                         access_to)
        self.assertRaises(lib_exc.BadRequest,
                          self.shares_v2_client.create_snapshot_access_rule,
                          self.snap["id"], access_type, access_to)

        # delete rule and wait for deletion
        self.shares_v2_client.delete_snapshot_access_rule(
            self.snap['id'], rule['id'])
        self.shares_v2_client.wait_for_snapshot_access_rule_deletion(
            self.snap['id'], rule['id'])

        self.assertRaises(lib_exc.NotFound,
                          self.shares_v2_client.delete_snapshot_access_rule,
                          self.snap['id'], rule['id'])
    def test_mount_share_one_vm(self):
        self.security_group = self._create_security_group()
        self.create_share()
        instance = self.boot_instance()
        self.allow_access_ip(self.share['id'], instance=instance,
                             cleanup=False)
        ssh_client = self.init_ssh(instance)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        for location in locations:
            self.mount_share(location, ssh_client)
            self.umount_share(ssh_client)
        self.servers_client.delete_server(instance['id'])
    def test_mount_share_one_vm(self):
        self.security_group = self._create_security_group()
        self.create_share()
        instance = self.boot_instance()
        self.allow_access_ip(self.share['id'], instance=instance,
                             cleanup=False)
        ssh_client = self.init_ssh(instance)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        for location in locations:
            self.mount_share(location, ssh_client)
            self.umount_share(ssh_client)
        self.servers_client.delete_server(instance['id'])
    def test_get_share_instance(self, version):
        """Test that we get the proper keys back for the instance."""
        utils.check_skip_if_microversion_not_supported(version)

        share_instances = self.shares_v2_client.get_instances_of_share(
            self.share['id'],
            version=version,
        )['share_instances']

        si = self.shares_v2_client.get_share_instance(
            share_instances[0]['id'], version=version)['share_instance']

        expected_keys = [
            'host',
            'share_id',
            'id',
            'share_network_id',
            'status',
            'availability_zone',
            'share_server_id',
            'created_at',
        ]
        if utils.is_microversion_lt(version, '2.9'):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.10'):
            expected_keys.append("access_rules_status")
        if utils.is_microversion_ge(version, '2.11'):
            expected_keys.append("replica_state")
        if utils.is_microversion_ge(version, '2.22'):
            expected_keys.append("share_type_id")
        if utils.is_microversion_ge(version, '2.30'):
            expected_keys.append("cast_rules_to_readonly")
        if utils.is_microversion_ge(version, '2.54'):
            expected_keys.append("progress")
        if utils.is_microversion_ge(version, '2.71'):
            expected_keys.append("updated_at")
        expected_keys = sorted(expected_keys)
        actual_keys = sorted(si.keys())
        self.assertEqual(
            expected_keys, actual_keys,
            'Share instance %s returned incorrect keys; '
            'expected %s, got %s.' % (si['id'], expected_keys, actual_keys))
Exemple #29
0
    def test_show_share_server(self):
        share = self.shares_v2_client.get_share(self.share["id"])['share']
        server = self.shares_v2_client.show_share_server(
            share["share_server_id"])['share_server']
        keys = [
            "id",
            "host",
            "project_id",
            "status",
            "share_network_name",
            "created_at",
            "updated_at",
            "backend_details",
        ]
        if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.49"):
            keys.append("is_auto_deletable")
            keys.append("identifier")
        if utils.is_microversion_ge(
            CONF.share.max_api_microversion, "2.51") and (
                utils.is_microversion_lt(
                    CONF.share.max_api_microversion, "2.70")):
            keys.append("share_network_subnet_id")
        if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.70"):
            keys.append("share_network_subnet_ids")
        # all expected keys are present
        for key in keys:
            self.assertIn(key, server.keys())

        # 'created_at' is valid date
        self.assertTrue(self.date_re.match(server["created_at"]))

        # 'updated_at' is valid date if set
        if server["updated_at"]:
            self.assertTrue(self.date_re.match(server["updated_at"]))

        # veriy that values for following keys are not empty
        for k in ('host', 'id', 'status'):
            self.assertGreater(len(server[k]), 0)

        # 'backend_details' should be a dict
        self.assertIsInstance(server["backend_details"], dict)
Exemple #30
0
    def test_manage_share_server_invalid_params(self, param, invalid_value):

        sn_subnets_not_supported = utils.is_microversion_lt(
            LATEST_MICROVERSION, utils.SHARE_NETWORK_SUBNETS_MICROVERSION)
        if param == 'share_network_subnet_id' and sn_subnets_not_supported:
            raise self.skipException("Share network subnets not supported by "
                                     "microversion %s" % LATEST_MICROVERSION)

        # create share
        share = self._create_share_with_new_share_network()
        el = self.shares_v2_client.list_share_export_locations(
            share['id'])['export_locations']
        share['export_locations'] = el
        share_server = self.shares_v2_client.show_share_server(
            share['share_server_id'])['share_server']

        self._unmanage_share_and_wait(share)
        self._unmanage_share_server_and_wait(share_server)

        # forge invalid params
        invalid_params = share_server.copy()
        invalid_params[param] = invalid_value

        # try to manage in the wrong way
        self.assertRaises(lib_exc.BadRequest, self._manage_share_server,
                          share_server, invalid_params)

        # manage in the correct way
        managed_share_server = self._manage_share_server(share_server)
        managed_share = self._manage_share(
            share,
            name="managed share that had ID %s" % share['id'],
            description="description for managed share",
            share_server_id=managed_share_server['id'])

        # delete share
        self._delete_share_and_wait(managed_share)

        # delete share server
        self._delete_share_server_and_wait(managed_share_server['id'])
    def _get_share(self, version):

        # get share
        share = self.shares_v2_client.get_share(
            self.shares[0]['id'], version=six.text_type(version))

        # verify keys
        expected_keys = [
            "status", "description", "links", "availability_zone",
            "created_at", "project_id", "volume_type", "share_proto", "name",
            "snapshot_id", "id", "size", "share_network_id", "metadata",
            "host", "snapshot_id", "is_public",
        ]
        if utils.is_microversion_lt(version, '2.9'):
            expected_keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.2'):
            expected_keys.append("snapshot_support")
        if utils.is_microversion_ge(version, '2.4'):
            expected_keys.extend(["consistency_group_id",
                                  "source_cgsnapshot_member_id"])
        if utils.is_microversion_ge(version, '2.5'):
            expected_keys.append("share_type_name")
        actual_keys = list(share.keys())
        [self.assertIn(key, actual_keys) for key in expected_keys]

        # verify values
        msg = "Expected name: '%s', actual name: '%s'" % (self.share_name,
                                                          share["name"])
        self.assertEqual(self.share_name, six.text_type(share["name"]), msg)

        msg = "Expected description: '%s', "\
              "actual description: '%s'" % (self.share_desc,
                                            share["description"])
        self.assertEqual(
            self.share_desc, six.text_type(share["description"]), msg)

        msg = "Expected size: '%s', actual size: '%s'" % (self.share_size,
                                                          share["size"])
        self.assertEqual(self.share_size, int(share["size"]), msg)
Exemple #32
0
    def test_share_type_create_get(self, version):
        utils.check_skip_if_microversion_not_supported(version)

        name = data_utils.rand_name("tempest-manila")
        description = None
        if utils.is_microversion_ge(version, "2.41"):
            description = "Description for share type"
        extra_specs = self.add_extra_specs_to_dict({
            "key": "value",
        })

        # Create share type
        st_create = self.create_share_type(name,
                                           extra_specs=extra_specs,
                                           version=version,
                                           description=description)
        self.assertEqual(name, st_create['name'])
        self._verify_description(description, st_create, version)
        self._verify_is_public_key_name(st_create, version)
        st_id = st_create["id"]

        # Get share type
        get = self.shares_v2_client.get_share_type(st_id, version=version)

        self.assertEqual(name, get["share_type"]["name"])
        self.assertEqual(st_id, get["share_type"]["id"])
        self._verify_description(description, get['share_type'], version)

        if utils.is_microversion_lt(version, "2.24"):
            # snapshot_support is an implied/required extra-spec until
            # version 2.24, and the service assumes it to be True since we
            # don't provide it during share type creation.
            extra_specs.update({"snapshot_support": 'True'})

        self.assertEqual(extra_specs, get["share_type"]["extra_specs"])
        self._verify_is_public_key_name(get['share_type'], version)

        # Check that backwards compatibility didn't break
        self.assertDictMatch(get["volume_type"], get["share_type"])
    def _list_shares_with_detail(self, version):

        # list shares
        shares = self.shares_v2_client.list_shares_with_detail(
            version=six.text_type(version))

        # verify keys
        keys = [
            "status", "description", "links", "availability_zone",
            "created_at", "project_id", "volume_type", "share_proto", "name",
            "snapshot_id", "id", "size", "share_network_id", "metadata",
            "snapshot_id", "is_public", "share_type",
        ]
        if utils.is_microversion_lt(version, '2.9'):
            keys.extend(["export_location", "export_locations"])
        if utils.is_microversion_ge(version, '2.2'):
            keys.append("snapshot_support")
        if utils.is_microversion_ge(version, '2.6'):
            keys.append("share_type_name")
        if utils.is_microversion_ge(version, '2.10'):
            keys.append("access_rules_status")
        if utils.is_microversion_ge(version, '2.11'):
            keys.append("replication_type")
        if utils.is_microversion_ge(version, '2.16'):
            keys.append("user_id")
        if utils.is_microversion_ge(version, '2.24'):
            keys.append("create_share_from_snapshot_support")
        if utils.is_microversion_ge(version,
                                    constants.REVERT_TO_SNAPSHOT_MICROVERSION):
            keys.append("revert_to_snapshot_support")
        [self.assertIn(key, sh.keys()) for sh in shares for key in keys]

        # our shares in list and have no duplicates
        for share in self.shares:
            gen = [sid["id"] for sid in shares if sid["id"] in share["id"]]
            msg = "expected id lists %s times in share list" % (len(gen))
            self.assertEqual(1, len(gen), msg)
    def test_migration_files(self):

        if self.protocol == "CIFS":
            raise self.skipException("Test for CIFS protocol not supported "
                                     "at this moment. Skipping.")

        if not CONF.share.run_migration_tests:
            raise self.skipException("Migration tests disabled. Skipping.")

        pools = self.shares_admin_client.list_pools()['pools']

        if len(pools) < 2:
            raise self.skipException("At least two different pool entries "
                                     "are needed to run migration tests. "
                                     "Skipping.")

        self.security_group = self._create_security_group()
        self.create_share()
        share = self.shares_client.get_share(self.share['id'])

        dest_pool = next((x for x in pools if x['name'] != share['host']),
                         None)

        self.assertIsNotNone(dest_pool)
        self.assertIsNotNone(dest_pool.get('name'))

        dest_pool = dest_pool['name']

        instance1 = self.boot_instance()
        self.allow_access_ip(self.share['id'], instance=instance1,
                             cleanup=False)
        ssh_client = self.init_ssh(instance1)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        self.mount_share(locations[0], ssh_client)

        ssh_client.exec_command("mkdir -p /mnt/f1")
        ssh_client.exec_command("mkdir -p /mnt/f2")
        ssh_client.exec_command("mkdir -p /mnt/f3")
        ssh_client.exec_command("mkdir -p /mnt/f4")
        ssh_client.exec_command("mkdir -p /mnt/f1/ff1")
        ssh_client.exec_command("sleep 1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("chmod -R 555 /mnt/f3")
        ssh_client.exec_command("chmod -R 777 /mnt/f4")

        self.umount_share(ssh_client)

        share = self.migrate_share(share['id'], dest_pool)
        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            new_locations = self.share['export_locations']
        else:
            new_exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            new_locations = [x['path'] for x in new_exports]

        self.assertEqual(dest_pool, share['host'])
        locations.sort()
        new_locations.sort()
        self.assertNotEqual(locations, new_locations)
        self.assertEqual('migration_success', share['task_state'])

        self.mount_share(new_locations[0], ssh_client)

        output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt")

        self.umount_share(ssh_client)

        self.assertTrue('1m1.bin' in output)
        self.assertTrue('1m2.bin' in output)
        self.assertTrue('1m3.bin' in output)
        self.assertTrue('1m4.bin' in output)
        self.assertTrue('1m5.bin' in output)
Exemple #35
0
    def test_migration_files(self):

        if self.protocol != "NFS":
            raise self.skipException("Only NFS protocol supported "
                                     "at this moment.")

        pools = self.shares_admin_v2_client.list_pools(detail=True)['pools']

        if len(pools) < 2:
            raise self.skipException("At least two different pool entries are "
                                     "needed to run share migration tests.")

        instance = self.boot_instance(wait_until="BUILD")
        self.create_share()
        instance = self.wait_for_active_instance(instance["id"])
        self.share = self.shares_client.get_share(self.share['id'])

        default_type = self.shares_v2_client.list_share_types(
            default=True)['share_type']

        dest_pool = utils.choose_matching_backend(
            self.share, pools, default_type)

        self.assertIsNotNone(dest_pool)
        self.assertIsNotNone(dest_pool.get('name'))

        dest_pool = dest_pool['name']

        self.allow_access_ip(
            self.share['id'], instance=instance, cleanup=False)
        ssh_client = self.init_ssh(instance)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            exports = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            self.assertNotEmpty(exports)
            exports = [x['path'] for x in exports]
            self.assertNotEmpty(exports)

        self.mount_share(exports[0], ssh_client)

        ssh_client.exec_command("mkdir -p /mnt/f1")
        ssh_client.exec_command("mkdir -p /mnt/f2")
        ssh_client.exec_command("mkdir -p /mnt/f3")
        ssh_client.exec_command("mkdir -p /mnt/f4")
        ssh_client.exec_command("mkdir -p /mnt/f1/ff1")
        ssh_client.exec_command("sleep 1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("chmod -R 555 /mnt/f3")
        ssh_client.exec_command("chmod -R 777 /mnt/f4")

        self.umount_share(ssh_client)

        task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED,
                      constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)

        self.share = self.migrate_share(
            self.share['id'], dest_pool, task_state)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            new_exports = self.share['export_locations']
            self.assertNotEmpty(new_exports)
        else:
            new_exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            self.assertNotEmpty(new_exports)
            new_exports = [x['path'] for x in new_exports]
            self.assertNotEmpty(new_exports)

        self.assertEqual(dest_pool, self.share['host'])
        self.assertEqual(constants.TASK_STATE_MIGRATION_SUCCESS,
                         self.share['task_state'])

        self.mount_share(new_exports[0], ssh_client)

        output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt")

        self.umount_share(ssh_client)

        self.assertTrue('1m1.bin' in output)
        self.assertTrue('1m2.bin' in output)
        self.assertTrue('1m3.bin' in output)
        self.assertTrue('1m4.bin' in output)
        self.assertTrue('1m5.bin' in output)
Exemple #36
0
    def test_migration_files(self):

        if self.protocol != "NFS":
            raise self.skipException("Only NFS protocol supported "
                                     "at this moment.")

        pools = self.shares_admin_v2_client.list_pools(detail=True)['pools']

        if len(pools) < 2:
            raise self.skipException("At least two different pool entries are "
                                     "needed to run share migration tests.")

        instance = self.boot_instance(wait_until="BUILD")
        self.create_share()
        instance = self.wait_for_active_instance(instance["id"])
        self.share = self.shares_client.get_share(self.share['id'])

        default_type = self.shares_v2_client.list_share_types(
            default=True)['share_type']

        dest_pool = utils.choose_matching_backend(self.share, pools,
                                                  default_type)

        self.assertIsNotNone(dest_pool)
        self.assertIsNotNone(dest_pool.get('name'))

        dest_pool = dest_pool['name']

        self.allow_access_ip(self.share['id'],
                             instance=instance,
                             cleanup=False)
        ssh_client = self.init_ssh(instance)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            exports = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            self.assertNotEmpty(exports)
            exports = [x['path'] for x in exports]
            self.assertNotEmpty(exports)

        self.mount_share(exports[0], ssh_client)

        ssh_client.exec_command("mkdir -p /mnt/f1")
        ssh_client.exec_command("mkdir -p /mnt/f2")
        ssh_client.exec_command("mkdir -p /mnt/f3")
        ssh_client.exec_command("mkdir -p /mnt/f4")
        ssh_client.exec_command("mkdir -p /mnt/f1/ff1")
        ssh_client.exec_command("sleep 1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("chmod -R 555 /mnt/f3")
        ssh_client.exec_command("chmod -R 777 /mnt/f4")

        self.umount_share(ssh_client)

        task_state = (constants.TASK_STATE_DATA_COPYING_COMPLETED,
                      constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE)

        self.share = self.migrate_share(self.share['id'], dest_pool,
                                        task_state)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            new_exports = self.share['export_locations']
            self.assertNotEmpty(new_exports)
        else:
            new_exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            self.assertNotEmpty(new_exports)
            new_exports = [x['path'] for x in new_exports]
            self.assertNotEmpty(new_exports)

        self.assertEqual(dest_pool, self.share['host'])
        self.assertEqual(constants.TASK_STATE_MIGRATION_SUCCESS,
                         self.share['task_state'])

        self.mount_share(new_exports[0], ssh_client)

        output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt")

        self.umount_share(ssh_client)

        self.assertTrue('1m1.bin' in output)
        self.assertTrue('1m2.bin' in output)
        self.assertTrue('1m3.bin' in output)
        self.assertTrue('1m4.bin' in output)
        self.assertTrue('1m5.bin' in output)
Exemple #37
0
 def skip_if_microversion_lt(self, microversion):
     if utils.is_microversion_lt(CONF.share.max_api_microversion,
                                 microversion):
         raise self.skipException(
             "Microversion must be greater than or equal to '%s'." %
             microversion)
    def test_migration_files(self):

        if self.protocol == "CIFS":
            raise self.skipException("Test for CIFS protocol not supported "
                                     "at this moment. Skipping.")

        if not CONF.share.run_migration_tests:
            raise self.skipException("Migration tests disabled. Skipping.")

        pools = self.shares_admin_client.list_pools()['pools']

        if len(pools) < 2:
            raise self.skipException("At least two different pool entries "
                                     "are needed to run migration tests. "
                                     "Skipping.")

        self.security_group = self._create_security_group()
        self.create_share()
        share = self.shares_client.get_share(self.share['id'])

        dest_pool = next((x for x in pools if x['name'] != share['host']),
                         None)

        self.assertIsNotNone(dest_pool)
        self.assertIsNotNone(dest_pool.get('name'))

        dest_pool = dest_pool['name']

        instance1 = self.boot_instance()
        self.allow_access_ip(self.share['id'],
                             instance=instance1,
                             cleanup=False)
        ssh_client = self.init_ssh(instance1)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = self.share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            locations = [x['path'] for x in exports]

        self.mount_share(locations[0], ssh_client)

        ssh_client.exec_command("mkdir -p /mnt/f1")
        ssh_client.exec_command("mkdir -p /mnt/f2")
        ssh_client.exec_command("mkdir -p /mnt/f3")
        ssh_client.exec_command("mkdir -p /mnt/f4")
        ssh_client.exec_command("mkdir -p /mnt/f1/ff1")
        ssh_client.exec_command("sleep 1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/1m1.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f2/1m2.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f3/1m3.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f4/1m4.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("dd if=/dev/zero of=/mnt/f1/ff1/1m5.bin bs=1M"
                                " count=1")
        ssh_client.exec_command("chmod -R 555 /mnt/f3")
        ssh_client.exec_command("chmod -R 777 /mnt/f4")

        self.umount_share(ssh_client)

        share = self.migrate_share(share['id'], dest_pool)
        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            new_locations = self.share['export_locations']
        else:
            new_exports = self.shares_v2_client.list_share_export_locations(
                self.share['id'])
            new_locations = [x['path'] for x in new_exports]

        self.assertEqual(dest_pool, share['host'])
        locations.sort()
        new_locations.sort()
        self.assertNotEqual(locations, new_locations)
        self.assertEqual('migration_success', share['task_state'])

        self.mount_share(new_locations[0], ssh_client)

        output = ssh_client.exec_command("ls -lRA --ignore=lost+found /mnt")

        self.umount_share(ssh_client)

        self.assertTrue('1m1.bin' in output)
        self.assertTrue('1m2.bin' in output)
        self.assertTrue('1m3.bin' in output)
        self.assertTrue('1m4.bin' in output)
        self.assertTrue('1m5.bin' in output)
Exemple #39
0
    def test_list_access_rules(self, version):
        utils.check_skip_if_microversion_not_supported(version)
        if (utils.is_microversion_lt(version, '2.13')
                and CONF.share.enable_cephx_rules_for_protocols):
            msg = ("API version %s does not support cephx access type, need "
                   "version >= 2.13." % version)
            raise self.skipException(msg)

        metadata = None
        if utils.is_microversion_ge(version, '2.45'):
            metadata = {'key1': 'v1', 'key2': 'v2'}
        if utils.is_microversion_le(version, '2.9'):
            client = self.shares_client
        else:
            client = self.shares_v2_client
        # create rule
        rule = self.allow_access(self.share["id"],
                                 client=client,
                                 access_type=self.access_type,
                                 access_to=self.access_to,
                                 metadata=metadata,
                                 version=version)

        # verify added rule keys since 2.33 when create rule
        if utils.is_microversion_ge(version, '2.33'):
            self.assertIn('created_at', list(rule.keys()))
            self.assertIn('updated_at', list(rule.keys()))
        else:
            self.assertNotIn('created_at', list(rule.keys()))
            self.assertNotIn('updated_at', list(rule.keys()))

        # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
        if utils.is_microversion_le(version, "2.27"):
            self.assertEqual("new", rule['state'])
        else:
            self.assertEqual("queued_to_apply", rule['state'])

        # list rules
        if utils.is_microversion_eq(version, '1.0'):
            rules = self.shares_client.list_access_rules(
                self.share["id"])['access_list']
        else:
            rules = self.shares_v2_client.list_access_rules(
                self.share["id"], version=version)['access_list']

        # verify keys
        keys = ("id", "access_type", "access_to", "access_level")
        if utils.is_microversion_ge(version, '2.21'):
            keys += ("access_key", )
        if utils.is_microversion_ge(version, '2.33'):
            keys += (
                "created_at",
                "updated_at",
            )
        if utils.is_microversion_ge(version, '2.45'):
            keys += ("metadata", )
        for key in keys:
            [self.assertIn(key, r.keys()) for r in rules]
        for key in ('deleted', 'deleted_at', 'instance_mappings'):
            [self.assertNotIn(key, r.keys()) for r in rules]

        # verify values
        self.assertEqual(self.access_type, rules[0]["access_type"])
        self.assertEqual(self.access_to, rules[0]["access_to"])
        self.assertEqual('rw', rules[0]["access_level"])
        if utils.is_microversion_ge(version, '2.21'):
            if self.access_type == 'cephx':
                self.assertIsNotNone(rules[0]['access_key'])
            else:
                self.assertIsNone(rules[0]['access_key'])

        # our share id in list and have no duplicates
        gen = [r["id"] for r in rules if r["id"] in rule["id"]]
        msg = "expected id lists %s times in rule list" % (len(gen))
        self.assertEqual(1, len(gen), msg)
Exemple #40
0
    def test_create_manage_and_write(self):
        share_size = CONF.share.share_size

        LOG.debug('Step 1 - create instance')
        instance = self.boot_instance(wait_until="BUILD")

        LOG.debug('Step 2 - create share of size {} Gb'.format(share_size))
        share = self.create_share(size=share_size, cleanup=False)
        instance = self.wait_for_active_instance(instance["id"])

        LOG.debug('Step 3 - SSH to UVM')
        remote_client = self.init_remote_client(instance)

        LOG.debug('Step 4 - provide access to instance')
        self._provide_access_to_client_identified_by_ip(instance, share=share)

        if utils.is_microversion_lt(CONF.share.max_api_microversion, "2.9"):
            locations = share['export_locations']
        else:
            exports = self.shares_v2_client.list_share_export_locations(
                share['id'])['export_locations']
            locations = [x['path'] for x in exports]

        LOG.debug('Step 5 - mount')
        self.mount_share(locations[0], remote_client)

        # Update share info, needed later
        share = self.shares_admin_v2_client.get_share(share['id'])['share']

        LOG.debug('Step 6a - create file')
        remote_client.exec_command("sudo touch /mnt/t1")

        LOG.debug('Step 6b - write data')
        LOG.debug('Step 6b - writing 640mb')
        self.write_data_to_mounted_share_using_dd(remote_client,
                                                  '/mnt/t1', 1024,
                                                  2048, '/dev/zero')
        ls_result = remote_client.exec_command("sudo ls -lA /mnt/")
        LOG.debug(ls_result)

        LOG.debug('Step 7 - unmount share')
        self.unmount_share(remote_client)

        LOG.debug('Step 8a - unmanage share')
        self.shares_admin_v2_client.unmanage_share(share['id'])

        LOG.debug('Step 8b - wait for status change')
        self.shares_admin_v2_client.wait_for_resource_deletion(
            share_id=share['id'])

        LOG.debug('Step 9 - get share, should fail')
        self.assertRaises(
            exceptions.NotFound,
            self.shares_admin_v2_client.get_share,
            share['id'])

        LOG.debug('Step 10 - manage share')
        share_type = self.get_share_type()
        managed_share = self.shares_admin_v2_client.manage_share(
            share['host'],
            share['share_proto'],
            locations[0],
            share_type['id'])['share']
        waiters.wait_for_resource_status(
            self.shares_admin_v2_client, managed_share['id'], 'available')

        LOG.debug('Step 11 - grant access again')
        self._provide_access_to_client_identified_by_ip(
            instance,
            share=managed_share,
            client=self.shares_admin_v2_client)

        exports = self.shares_admin_v2_client.list_share_export_locations(
            managed_share['id'])['export_locations']
        locations = [x['path'] for x in exports]

        LOG.debug('Step 12 - mount')
        self.mount_share(locations[0], remote_client)

        LOG.debug('Step 12 - verify data')
        ls_result = remote_client.exec_command("sudo ls -lA /mnt/")
        LOG.debug(ls_result)

        LOG.debug('Step 13 - unmount share')
        self.unmount_share(remote_client)

        LOG.debug('Step 14 - delete share')
        self.shares_admin_v2_client.delete_share(managed_share['id'])
        self.shares_admin_v2_client.wait_for_resource_deletion(
            share_id=managed_share['id'])

        LOG.debug('Step 15 - manage share, should fail')
        remanaged_share = self.shares_admin_v2_client.manage_share(
            share['host'],
            share['share_proto'],
            locations[0],
            share_type['id'])['share']
        waiters.wait_for_resource_status(
            self.shares_admin_v2_client, remanaged_share['id'], 'manage_error')

        self.shares_admin_v2_client.reset_state(remanaged_share['id'])

        LOG.debug('Step 16 - delete failed managed share')
        self.shares_admin_v2_client.delete_share(remanaged_share['id'])
        self.shares_admin_v2_client.wait_for_resource_deletion(
            share_id=remanaged_share['id'])
Exemple #41
0
 def skip_if_microversion_lt(self, microversion):
     if utils.is_microversion_lt(CONF.share.max_api_microversion,
                                 microversion):
         raise self.skipException(
             "Microversion must be greater than or equal to '%s'." %
             microversion)
Exemple #42
0
    def test_list_access_rules(self, version):
        if (utils.is_microversion_lt(version, '2.13')
                and CONF.share.enable_cephx_rules_for_protocols):
            msg = ("API version %s does not support cephx access type, need "
                   "version >= 2.13." % version)
            raise self.skipException(msg)

        # create rule
        if utils.is_microversion_eq(version, '1.0'):
            rule = self.shares_client.create_access_rule(
                self.share["id"], self.access_type, self.access_to)
        else:
            rule = self.shares_v2_client.create_access_rule(self.share["id"],
                                                            self.access_type,
                                                            self.access_to,
                                                            version=version)

        # verify added rule keys since 2.33 when create rule
        if utils.is_microversion_ge(version, '2.33'):
            self.assertIn('created_at', list(rule.keys()))
            self.assertIn('updated_at', list(rule.keys()))
        else:
            self.assertNotIn('created_at', list(rule.keys()))
            self.assertNotIn('updated_at', list(rule.keys()))

        # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
        if utils.is_microversion_le(version, "2.27"):
            self.assertEqual("new", rule['state'])
        else:
            self.assertEqual("queued_to_apply", rule['state'])

        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.wait_for_access_rule_status(
                self.share["id"], rule["id"], "active")
        elif utils.is_microversion_eq(version, '2.9'):
            self.shares_v2_client.wait_for_access_rule_status(
                self.share["id"], rule["id"], "active")
        else:
            self.shares_v2_client.wait_for_share_status(
                self.share["id"],
                "active",
                status_attr='access_rules_status',
                version=version)

        # list rules
        if utils.is_microversion_eq(version, '1.0'):
            rules = self.shares_client.list_access_rules(self.share["id"])
        else:
            rules = self.shares_v2_client.list_access_rules(self.share["id"],
                                                            version=version)

        # verify keys
        keys = ("id", "access_type", "access_to", "access_level")
        if utils.is_microversion_ge(version, '2.21'):
            keys += ("access_key", )
        if utils.is_microversion_ge(version, '2.33'):
            keys += (
                "created_at",
                "updated_at",
            )
        for key in keys:
            [self.assertIn(key, r.keys()) for r in rules]
        for key in ('deleted', 'deleted_at', 'instance_mappings'):
            [self.assertNotIn(key, r.keys()) for r in rules]

        # verify values
        self.assertEqual(self.access_type, rules[0]["access_type"])
        self.assertEqual(self.access_to, rules[0]["access_to"])
        self.assertEqual('rw', rules[0]["access_level"])
        if utils.is_microversion_ge(version, '2.21'):
            if self.access_type == 'cephx':
                self.assertIsNotNone(rules[0]['access_key'])
            else:
                self.assertIsNone(rules[0]['access_key'])

        # our share id in list and have no duplicates
        gen = [r["id"] for r in rules if r["id"] in rule["id"]]
        msg = "expected id lists %s times in rule list" % (len(gen))
        self.assertEqual(1, len(gen), msg)

        if utils.is_microversion_eq(version, '1.0'):
            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
            self.shares_client.wait_for_resource_deletion(
                rule_id=rule["id"], share_id=self.share['id'])
        else:
            self.shares_v2_client.delete_access_rule(self.share["id"],
                                                     rule["id"],
                                                     version=version)
            self.shares_v2_client.wait_for_resource_deletion(
                rule_id=rule["id"], share_id=self.share['id'], version=version)