コード例 #1
0
ファイル: smoke.py プロジェクト: bhushan5/cloudroast
    def volume_types_with_restore_control(
            cls, max_datasets=None, randomize=False, model_filter=None,
            filter_mode=BlockstorageDatasets.INCLUSION_MODE):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values.
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=model_filter,
            filter_mode=filter_mode)

        dataset_list = DatasetList()
        is_enabled = \
            cls._volumes.config.allow_snapshot_restore_to_different_type
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_,
                    'restore_to_different_type_enabled': is_enabled}
            test_name = "{0}_to_other_is_{1}".format(
                vol_type.name, "allowed" if is_enabled else "disabled")
            dataset_list.append_new_dataset(test_name, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
コード例 #2
0
ファイル: datasets.py プロジェクト: zerolugithub/cloudcafe
    def volume_types(cls,
                     max_datasets=None,
                     randomize=None,
                     model_filter=None,
                     filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
                     tags=None):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(volume_type_list,
                                                  model_filter=model_filter,
                                                  filter_mode=filter_mode)

        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {
                'volume_type_name': vol_type.name,
                'volume_type_id': vol_type.id_
            }
            dataset_list.append_new_dataset(vol_type.name, data)

        # Apply modifiers
        dataset_list = cls._modify_dataset_list(dataset_list,
                                                max_datasets=max_datasets,
                                                randomize=randomize)

        # Apply Tags
        if tags:
            dataset_list.apply_test_tags(*tags)

        return dataset_list
コード例 #3
0
    def volume_types_with_restore_control(
            cls,
            max_datasets=None,
            randomize=False,
            model_filter=None,
            filter_mode=BlockstorageDatasets.INCLUSION_MODE):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values.
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(volume_type_list,
                                                  model_filter=model_filter,
                                                  filter_mode=filter_mode)

        dataset_list = DatasetList()
        is_enabled = \
            cls._volumes.config.allow_snapshot_restore_to_different_type
        for vol_type in volume_type_list:
            data = {
                'volume_type_name': vol_type.name,
                'volume_type_id': vol_type.id_,
                'restore_to_different_type_enabled': is_enabled
            }
            test_name = "{0}_to_other_is_{1}".format(
                vol_type.name, "allowed" if is_enabled else "disabled")
            dataset_list.append_new_dataset(test_name, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
コード例 #4
0
    def images_by_volume_type(cls,
                              max_datasets=None,
                              randomize=False,
                              image_filter=None,
                              volume_type_filter=None):
        """Returns a DatasetList of permuations of Volume Types and Images.
        Requests all available images and volume types from API, and applies
        image_filter and volume_type_filter if provided.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        image_list = cls._filter_model_list(cls._images(), image_filter)
        volume_type_list = cls._filter_model_list(cls._volume_types(),
                                                  volume_type_filter)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            for img in image_list:
                data = {'volume_type': vol_type, 'image': img}
                testname = "{0}_volume_from_{1}_image".format(
                    vol_type.name,
                    str(img.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        if randomize:
            shuffle(dataset_list)

        if max_datasets:
            dataset_list = dataset_list[:max_datasets]

        return dataset_list
コード例 #5
0
ファイル: datasets.py プロジェクト: openstack/cloudcafe
    def images_by_volume_type(
            cls, max_datasets=None, randomize=False,
            image_filter=None, volume_type_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Images and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=image_filter,
            filter_mode=image_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for image in image_list:
                data = {'volume_type': vtype,
                        'image': image}
                testname = \
                    "{0}_and_{1}".format(
                        str(vtype.name).replace(" ", "_"),
                        str(image.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
コード例 #6
0
ファイル: datasets.py プロジェクト: kshortwindham/cloudcafe
    def images_by_flavor(
            cls, max_datasets=None, randomize=False,
            image_filter=None, flavor_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Flavors and Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=image_filter,
            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(
            flavor_list, model_filter=flavor_filter,
            filter_mode=flavor_filter_mode)

        dataset_list = DatasetList()
        for image in image_list:
            for flavor in flavor_list:
                data = {'flavor': flavor,
                        'image': image}
                testname = \
                    "image_{0}_and_flavor_{1}".format(
                        str(image.name).replace(" ", "_").replace("/", "-"),
                        str(flavor.name).replace(" ", "_").replace("/", "-"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
コード例 #7
0
ファイル: datasets.py プロジェクト: openstack/cloudcafe
    def volume_types(
            cls, max_datasets=None, randomize=None, model_filter=None,
            filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE, tags=None):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=model_filter,
            filter_mode=filter_mode)

        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_}
            dataset_list.append_new_dataset(vol_type.name, data)

        # Apply modifiers
        dataset_list = cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)

        # Apply Tags
        if tags:
            dataset_list.apply_test_tags(*tags)

        return dataset_list
コード例 #8
0
ファイル: datasets.py プロジェクト: cloudkeep/cloudroast
    def images_by_volume_type(
            cls, max_datasets=None, randomize=False, image_filter=None,
            volume_type_filter=None):
        """Returns a DatasetList of permuations of Volume Types and Images.
        Requests all available images and volume types from API, and applies
        image_filter and volume_type_filter if provided.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        image_list = cls._filter_model_list(cls._images(), image_filter)
        volume_type_list = cls._filter_model_list(
            cls._volume_types(), volume_type_filter)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            for img in image_list:
                data = {'volume_type': vol_type,
                        'image': img}
                testname = "{0}_volume_from_{1}_image".format(
                    vol_type.name,
                    str(img.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        if randomize:
            shuffle(dataset_list)

        if max_datasets:
            dataset_list = dataset_list[:max_datasets]

        return dataset_list
コード例 #9
0
ファイル: fixtures.py プロジェクト: cloudkeep/cloudroast
    def volume_types(cls):
        """Returns a DatasetList of Volume Type names and id's"""

        cinder_cli = CinderCLI_Composite()
        volume_type_list = cinder_cli.behaviors.list_volume_types()
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_}
            dataset_list.append_new_dataset(vol_type.name, data)
        return dataset_list
コード例 #10
0
ファイル: datasets.py プロジェクト: zerolugithub/cloudcafe
    def flavors_by_images_by_volume_type(
        cls,
        max_datasets=None,
        randomize=None,
        flavor_filter=None,
        volume_type_filter=None,
        image_filter=None,
        flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
        volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
        image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
    ):
        """Returns a DatasetList of all combinations of Flavors and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=image_filter,
                                            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(flavor_list,
                                             model_filter=flavor_filter,
                                             filter_mode=flavor_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list,
            model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images, flavors, and
        # volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for flavor in flavor_list:
                for image in image_list:
                    data = {
                        'volume_type': vtype,
                        'flavor': flavor,
                        'image': image
                    }
                    testname = \
                        "{flavor}_{image}_on_{vtype}".format(
                            flavor=str(flavor.name), image=str(image.name),
                            vtype=str(vtype.name)).replace(' ', '_').replace(
                            '.', '_').replace('(', '').replace(')', '')
                    dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
コード例 #11
0
ファイル: datasets.py プロジェクト: varapreddy/cloudroast
    def volume_types(
            cls, max_datasets=None, randomize=False, volume_type_filter=None):
        """Returns a DatasetList of Volume Type names and id's"""

        volume_type_list = cls._filter_model_list(
            cls._volume_types(), volume_type_filter)
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_}
            dataset_list.append_new_dataset(vol_type.name, data)
        return dataset_list
コード例 #12
0
ファイル: fixtures.py プロジェクト: varapreddy/cloudroast
    def volume_types(cls):
        """Returns a DatasetList of Volume Type names and id's"""

        cinder_cli = CinderCLI_Composite()
        volume_type_list = cinder_cli.behaviors.list_volume_types()
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {
                'volume_type_name': vol_type.name,
                'volume_type_id': vol_type.id_
            }
            dataset_list.append_new_dataset(vol_type.name, data)
        return dataset_list
コード例 #13
0
ファイル: datasets.py プロジェクト: openstack/cloudcafe
    def flavors_by_images_by_volume_type(
            cls, max_datasets=None, randomize=None,
            flavor_filter=None, volume_type_filter=None, image_filter=None,
            flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,):
        """Returns a DatasetList of all combinations of Flavors and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=image_filter,
            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(
            flavor_list, model_filter=flavor_filter,
            filter_mode=flavor_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images, flavors, and
        # volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for flavor in flavor_list:
                for image in image_list:
                    data = {'volume_type': vtype,
                            'flavor': flavor,
                            'image': image}
                    testname = \
                        "{flavor}_{image}_on_{vtype}".format(
                            flavor=str(flavor.name), image=str(image.name),
                            vtype=str(vtype.name)).replace(' ', '_').replace(
                            '.', '_').replace('(', '').replace(
                            ')', '').replace('/', '-')
                    dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
コード例 #14
0
    def valid_quota_names(cls):

        """Creates a list of expected resource names"""

        quota_test_dataset = DatasetList()

        resources = ["snapshots", "volumes", "gigabytes"]
        vol_types = cls._get_volume_type_names()

        for resource in resources:
            quota_test_dataset.append_new_dataset(resource, {"quota_name": resource})

            for vol_name in vol_types:
                resource_key = "{resource}_{vol_name}".format(resource=resource, vol_name=vol_name)
                quota_test_dataset.append_new_dataset(resource_key, {"quota_name": resource_key})

        return quota_test_dataset
コード例 #15
0
    def valid_quota_names(cls):
        """Creates a list of expected resource names"""

        quota_test_dataset = DatasetList()

        resources = ["snapshots", "volumes", "gigabytes"]
        vol_types = cls._get_volume_type_names()

        for resource in resources:
            quota_test_dataset.append_new_dataset(resource,
                                                  {"quota_name": resource})

            for vol_name in vol_types:
                resource_key = "{resource}_{vol_name}".format(
                    resource=resource, vol_name=vol_name)
                quota_test_dataset.append_new_dataset(
                    resource_key, {"quota_name": resource_key})

        return quota_test_dataset
コード例 #16
0
def build_basic_dataset(data_dict, name):
    """
    @summary: Builds a dataset list from a dictionary of key-value pairs

    @param data_dict: Url amendments and values for the dataset list
    @type data_dict: Dictionary
    @param name: Name of the test parameter
    @type name: String

    @return: Dataset_List
    @rtype: DatasetList
    """

    dataset_list = DatasetList()

    for key, value in data_dict.iteritems():
        dataset_list.append_new_dataset(key, {name: value})

    return dataset_list
コード例 #17
0
ファイル: generators.py プロジェクト: bhushan5/cloudroast
def build_basic_dataset(data_dict, name):
    """
    @summary: Builds a dataset list from a dictionary of key-value pairs

    @param data_dict: Url amendments and values for the dataset list
    @type data_dict: Dictionary
    @param name: Name of the test parameter
    @type name: String

    @return: Dataset_List
    @rtype: DatasetList
    """

    dataset_list = DatasetList()

    for key, value in data_dict.iteritems():
        dataset_list.append_new_dataset(key, {name: value})

    return dataset_list
コード例 #18
0
ファイル: datasets.py プロジェクト: kshortwindham/cloudcafe
    def flavors(
            cls, max_datasets=None, randomize=False, model_filter=None,
            filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Flavors
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(
            flavor_list, model_filter=model_filter, filter_mode=filter_mode)

        dataset_list = DatasetList()
        for flavor in flavor_list:
            data = {'flavor': flavor}
            dataset_list.append_new_dataset(
                str(flavor.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
コード例 #19
0
ファイル: datasets.py プロジェクト: kshortwindham/cloudcafe
    def images(
            cls, max_datasets=None, randomize=False, model_filter=None,
            filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=model_filter, filter_mode=filter_mode)

        dataset_list = DatasetList()
        for img in image_list:
            data = {'image': img}
            dataset_list.append_new_dataset(
                str(img.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
コード例 #20
0
ファイル: datasets.py プロジェクト: zerolugithub/cloudcafe
    def images_by_volume_type(
            cls,
            max_datasets=None,
            randomize=False,
            image_filter=None,
            volume_type_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Images and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=image_filter,
                                            filter_mode=image_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list,
            model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for image in image_list:
                data = {'volume_type': vtype, 'image': image}
                testname = \
                    "{0}_and_{1}".format(
                        str(vtype.name).replace(" ", "_"),
                        str(image.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
コード例 #21
0
    def images_by_flavor(
            cls,
            max_datasets=None,
            randomize=False,
            image_filter=None,
            flavor_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Flavors and Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=image_filter,
                                            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(flavor_list,
                                             model_filter=flavor_filter,
                                             filter_mode=flavor_filter_mode)

        dataset_list = DatasetList()
        for image in image_list:
            for flavor in flavor_list:
                data = {'flavor': flavor, 'image': image}
                testname = \
                    "image_{0}_and_flavor_{1}".format(
                        str(image.name).replace(" ", "_").replace("/", "-"),
                        str(flavor.name).replace(" ", "_").replace("/", "-"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
コード例 #22
0
    def flavors(cls,
                max_datasets=None,
                randomize=False,
                model_filter=None,
                filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Flavors
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(flavor_list,
                                             model_filter=model_filter,
                                             filter_mode=filter_mode)

        dataset_list = DatasetList()
        for flavor in flavor_list:
            data = {'flavor': flavor}
            dataset_list.append_new_dataset(
                str(flavor.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
コード例 #23
0
    def images(cls,
               max_datasets=None,
               randomize=False,
               model_filter=None,
               filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=model_filter,
                                            filter_mode=filter_mode)

        dataset_list = DatasetList()
        for img in image_list:
            data = {'image': img}
            dataset_list.append_new_dataset(
                str(img.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
コード例 #24
0
                                              data_driven_test)
from cafe.engine.http.client import HTTPClient
from cloudcafe.common.tools.check_dict import get_value
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
    Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_CONTAINER_NAME = 'tempurl'
CONTENT_TYPE_TEXT = 'text/plain; charset=UTF-8'
TEMPURL_KEY_LIFE = 20
EXPECTED_DISPOSITION = ("attachment; filename=\"{filename}\";"
                        " filename*=UTF-8\'\'{filename}")

sha_type = DatasetList()

sha_type.append_new_dataset("with_sha1", {"sha_type": "sha1"})
sha_type.append_new_dataset("with_sha2", {"sha_type": "sha2"})


@DataDrivenFixture
class TempUrlTest(ObjectStorageFixture):
    def _reset_default_key(self):
        response = None
        headers = {'X-Account-Meta-Temp-URL-Key': self.tempurl_key}
        response = self.client.set_temp_url_key(headers=headers)
        time.sleep(self.objectstorage_api_config.tempurl_key_cache_time)
        return response

    @classmethod
    def setUpClass(cls):
        super(TempUrlTest, cls).setUpClass()
コード例 #25
0
limitations under the License.
"""
from cafe.drivers.unittest.decorators import data_driven_test
from cafe.drivers.unittest.decorators import DataDrivenFixture
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cafe.drivers.unittest.datasets import DatasetList

CONTENT_TYPE_TEXT = "text/plain; charset=utf-8"
CONTENT_TYPE_XML = "application/xml; charset=utf-8"
CONTENT_TYPE_JSON = "application/json; charset=utf-8"
CONTAINER_NAME = "list_format_test_container"

data_set_list = DatasetList()

data_set_list.append_new_dataset("text", {
    "content_type": CONTENT_TYPE_TEXT,
    "headers": {}
})

data_set_list.append_new_dataset("json_header", {
    "content_type": CONTENT_TYPE_JSON,
    "headers": {
        "Accept": CONTENT_TYPE_JSON
    }
})

data_set_list.append_new_dataset("json_param", {
    "content_type": CONTENT_TYPE_JSON,
    "params": {
        "format": "json"
    }
})
コード例 #26
0
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes


LONG_DESCRIPTION_DATA = 'Long Security Group Test text description' * 10
LONG_NAME_DATA = 'Long Security Group Test text name name name' * 10


# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='w_name',
    data_dict={"name": 'test_secgroup_name_update'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_description',
    data_dict={"description": 'Security Group updated description'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_name_and_description',
    data_dict={"name": 'test_secgroup_name_update',
               "description": 'Security Group updated description'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_blank_name_and_description',
    data_dict={"name": '', "expected_name": '', "description": '',
               "expected_description": '', "use_false_values": True},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
コード例 #27
0
from cafe.drivers.unittest.decorators import DataDrivenFixture, data_driven_test
from cloudroast.networking.networks.fixtures import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants import (
    SecurityGroupsErrorTypes,
    SecurityGroupsResponseCodes,
)


LONG_DESCRIPTION_DATA = "Long Security Group Test text description" * 10
LONG_NAME_DATA = "Long Security Group Test text name name name" * 10


# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name="w_name", data_dict={"name": "test_secgroup_create"}, tags=["sec_group", "post", "positive", "rbac_creator"]
)
data_set_list.append_new_dataset(
    name="w_long_name",
    data_dict={"name": "1234567890123456789012345678901234567890"},
    tags=["sec_group", "post", "positive", "rbac_creator"],
)
data_set_list.append_new_dataset(
    name="w_none_name_and_description",
    data_dict={
        "name": None,
        "expected_name": "",
        "description": None,
        "expected_description": "",
        "tenant_id": None,
        "use_false_values": True,
コード例 #28
0
from cloudcafe.common.tools import randomstring as randstring
from cloudcafe.objectstorage.objectstorage_api.common.constants import Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

supported_formats = ["tar", "tar.gz", "tar.bz2"]
archive_formats = DatasetList()
for archive_format in supported_formats:
    for wrong_format in supported_formats:
        if archive_format == wrong_format:
            continue
        name = "{}-{}".format(archive_format, wrong_format)
        archive_formats.append_new_dataset(
            name, {"name": name, "archive_format": archive_format, "wrong_format": wrong_format}
        )


@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations:
    """

    @classmethod
    def setUpClass(cls):
        super(ExtractArchiveFormatParameterTest, cls).setUpClass()
        cls.default_obj_name = Constants.VALID_OBJECT_NAME
        cls.data_dir = EngineConfig().data_directory
        cls.no_compression = None
コード例 #29
0
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes

LONG_DESCRIPTION_DATA = 'Long Security Group Test text description' * 10
LONG_NAME_DATA = 'Long Security Group Test text name name name' * 10

# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='w_name',
    data_dict={"name": 'test_secgroup_create'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_long_name',
    data_dict={"name": '1234567890123456789012345678901234567890'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_none_name_and_description',
    data_dict={
        "name": None,
        "expected_name": '',
        "description": None,
        "expected_description": '',
        "tenant_id": None,
        "use_false_values": True
    },
コード例 #30
0
limitations under the License.
"""
from cafe.drivers.unittest.decorators import data_driven_test
from cafe.drivers.unittest.decorators import DataDrivenFixture
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cafe.drivers.unittest.datasets import DatasetList

CONTENT_TYPE_TEXT = "text/plain; charset=utf-8"
CONTENT_TYPE_XML = "application/xml; charset=utf-8"
CONTENT_TYPE_JSON = "application/json; charset=utf-8"
CONTAINER_NAME = "list_format_test_container"


data_set_list = DatasetList()

data_set_list.append_new_dataset("text", {"content_type": CONTENT_TYPE_TEXT, "headers": {}})

data_set_list.append_new_dataset(
    "json_header", {"content_type": CONTENT_TYPE_JSON, "headers": {"Accept": CONTENT_TYPE_JSON}}
)

data_set_list.append_new_dataset("json_param", {"content_type": CONTENT_TYPE_JSON, "params": {"format": "json"}})

data_set_list.append_new_dataset(
    "xml_header", {"content_type": CONTENT_TYPE_XML, "headers": {"Accept": CONTENT_TYPE_XML}}
)

data_set_list.append_new_dataset("xml_param", {"content_type": CONTENT_TYPE_XML, "params": {"format": "xml"}})


@DataDrivenFixture
コード例 #31
0
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudcafe.common.tools import randomstring as randstring


BASE_NAME = "extract_archive"
HTTP_OK = 200

supported_formats = ['tar', 'tar.gz', 'tar.bz2']
archive_formats = DatasetList()
for archive_format in supported_formats:
    for wrong_format in supported_formats:
        if archive_format == wrong_format:
            continue
        name = '{}-{}'.format(archive_format, wrong_format)
        archive_formats.append_new_dataset(
            name, {'name': name,
                   'archive_format': archive_format,
                   'wrong_format': wrong_format})


@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations:
    """
    @classmethod
    def setUpClass(cls):
        super(ExtractArchiveFormatParameterTest, cls).setUpClass()
        cls.default_obj_name = cls.behaviors.VALID_OBJECT_NAME
        cls.data_dir = EngineConfig().data_directory
        cls.no_compression = None
        cls.storage_url = cls.client.storage_url
コード例 #32
0
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test, tags
from vnc_api import vnc_api
from cloudcafe.networking.networks.config import ContrailConfig
from cloudcafe.networking.networks.extensions.security_groups_api.composites \
    import SecurityGroupsComposite

CONF = ContrailConfig()

# Creating data sets for data driven test
data_set_list_lls = DatasetList()
data_set_list_lls.append_new_dataset(
        name='with_fabric_dns',
        data_dict={'lls_name': 'cloudroast-lls-%s' % randint(1, 9),
                   'lls_ip': '169.254.%s.%s' % (randint(0, 254),
                                                randint(1, 254)),
                   'lls_port': int(CONF.fabric_service_port),
                   'fabric_port': int(CONF.fabric_service_port),
                   'fabric_dns': CONF.fabric_service_name},
        tags=['scenario', 'sdn'])
data_set_list_lls.append_new_dataset(
        name='with_fabric_ip',
        data_dict={'lls_name': 'cloudroast-lls-%s' % randint(1, 9),
                   'lls_ip': '169.254.%s.%s' % (randint(0, 254),
                                                randint(1, 254)),
                   'lls_port': int(CONF.fabric_service_port),
                   'fabric_port': int(CONF.fabric_service_port),
                   'fabric_ip': CONF.fabric_service_ip,
                   },
        tags=['scenario', 'sdn'])
コード例 #33
0
"""
import unittest

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test, tags
from cloudcafe.networking.networks.config import NetworkingSecondUserConfig
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes

# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='',
    data_dict={'direction': 'egress'},
    tags=['sec_group_egress', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_icmp',
    data_dict={
        'protocol': 'icmp',
        'direction': 'egress'
    },
    tags=['sec_group_egress', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_tcp',
    data_dict={
        'protocol': 'tcp',
        'direction': 'egress'
    },
    tags=['sec_group_egress', 'post', 'positive', 'rbac_creator'])
コード例 #34
0
limitations under the License.
"""
import os

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (
    DataDrivenFixture, data_driven_test)
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools.md5hash import get_md5_hash
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

archive_formats = DatasetList()
archive_formats.append_new_dataset(
    'tar', {'name': 'tar', 'archive_format': 'tar'})
archive_formats.append_new_dataset(
    'tar.gz', {'name': 'tar.gz', 'archive_format': 'tar.gz'})
archive_formats.append_new_dataset(
    'tar.bz2', {'name': 'tar.bz2', 'archive_format': 'tar.bz2'})


@DataDrivenFixture
class ExtractArchiveTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations
    Notes:
    The initial response status code is for initial the request.
    The object extraction status code is sent in the body of the
    response.
    """
コード例 #35
0
import unittest

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test, tags
from cloudcafe.networking.networks.config import NetworkingSecondUserConfig
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes


# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='',
    data_dict={},
    tags=['sdn', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_icmp',
    data_dict={'protocol': 'icmp'},
    tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_tcp',
    data_dict={'protocol': 'tcp'},
    tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_udp',
    data_dict={'protocol': 'udp'},
    tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_ethertype_ipv4',
コード例 #36
0
    Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

supported_formats = ['tar', 'tar.gz', 'tar.bz2']
archive_formats = DatasetList()
for archive_format in supported_formats:
    for wrong_format in supported_formats:
        if archive_format == wrong_format:
            continue
        name = '{}-{}'.format(archive_format, wrong_format)
        archive_formats.append_new_dataset(
            name, {
                'name': name,
                'archive_format': archive_format,
                'wrong_format': wrong_format
            })


@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations:
    """
    @classmethod
    def setUpClass(cls):
        super(ExtractArchiveFormatParameterTest, cls).setUpClass()
        cls.default_obj_name = Constants.VALID_OBJECT_NAME
        cls.data_dir = EngineConfig().data_directory
        cls.no_compression = None
コード例 #37
0
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (DataDrivenFixture,
                                              data_driven_test)
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools.md5hash import get_md5_hash
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
    Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

archive_formats = DatasetList()
archive_formats.append_new_dataset('tar', {
    'name': 'tar',
    'archive_format': 'tar'
})
archive_formats.append_new_dataset('tar.gz', {
    'name': 'tar.gz',
    'archive_format': 'tar.gz'
})
archive_formats.append_new_dataset('tar.bz2', {
    'name': 'tar.bz2',
    'archive_format': 'tar.bz2'
})


@DataDrivenFixture
class ExtractArchiveTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations