示例#1
0
    def volume_types(cls,
                     max_datasets=None,
                     randomize=False,
                     model_filter=None,
                     filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(volume_type_list,
                                                  model_filter=model_filter,
                                                  filter_mode=filter_mode)

        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {
                'volume_type_name': vol_type.name,
                'volume_type_id': vol_type.id_
            }
            dataset_list.append_new_dataset(vol_type.name, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#2
0
    def volume_types_with_restore_control(
            cls,
            max_datasets=None,
            randomize=False,
            model_filter=None,
            filter_mode=BlockstorageDatasets.INCLUSION_MODE):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values.
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(volume_type_list,
                                                  model_filter=model_filter,
                                                  filter_mode=filter_mode)

        dataset_list = DatasetList()
        is_enabled = \
            cls._volumes.config.allow_snapshot_restore_to_different_type
        for vol_type in volume_type_list:
            data = {
                'volume_type_name': vol_type.name,
                'volume_type_id': vol_type.id_,
                'restore_to_different_type_enabled': is_enabled
            }
            test_name = "{0}_to_other_is_{1}".format(
                vol_type.name, "allowed" if is_enabled else "disabled")
            dataset_list.append_new_dataset(test_name, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#3
0
    def images_by_volume_type(cls,
                              max_datasets=None,
                              randomize=False,
                              image_filter=None,
                              volume_type_filter=None):
        """Returns a DatasetList of permuations of Volume Types and Images.
        Requests all available images and volume types from API, and applies
        image_filter and volume_type_filter if provided.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        image_list = cls._filter_model_list(cls._images(), image_filter)
        volume_type_list = cls._filter_model_list(cls._volume_types(),
                                                  volume_type_filter)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            for img in image_list:
                data = {'volume_type': vol_type, 'image': img}
                testname = "{0}_volume_from_{1}_image".format(
                    vol_type.name,
                    str(img.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        if randomize:
            shuffle(dataset_list)

        if max_datasets:
            dataset_list = dataset_list[:max_datasets]

        return dataset_list
示例#4
0
    def images_by_volume_type(
            cls, max_datasets=None, randomize=False,
            image_filter=None, volume_type_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Images and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=image_filter,
            filter_mode=image_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for image in image_list:
                data = {'volume_type': vtype,
                        'image': image}
                testname = \
                    "{0}_and_{1}".format(
                        str(vtype.name).replace(" ", "_"),
                        str(image.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
示例#5
0
    def images_by_flavor(
            cls, max_datasets=None, randomize=False,
            image_filter=None, flavor_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Flavors and Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=image_filter,
            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(
            flavor_list, model_filter=flavor_filter,
            filter_mode=flavor_filter_mode)

        dataset_list = DatasetList()
        for image in image_list:
            for flavor in flavor_list:
                data = {'flavor': flavor,
                        'image': image}
                testname = \
                    "image_{0}_and_flavor_{1}".format(
                        str(image.name).replace(" ", "_").replace("/", "-"),
                        str(flavor.name).replace(" ", "_").replace("/", "-"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
示例#6
0
    def volume_types(
            cls, max_datasets=None, randomize=None, model_filter=None,
            filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE, tags=None):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=model_filter,
            filter_mode=filter_mode)

        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_}
            dataset_list.append_new_dataset(vol_type.name, data)

        # Apply modifiers
        dataset_list = cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)

        # Apply Tags
        if tags:
            dataset_list.apply_test_tags(*tags)

        return dataset_list
示例#7
0
    def volume_types_with_restore_control(
            cls, max_datasets=None, randomize=False, model_filter=None,
            filter_mode=BlockstorageDatasets.INCLUSION_MODE):
        """Returns a DatasetList of all VolumeTypes
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values.
        """

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=model_filter,
            filter_mode=filter_mode)

        dataset_list = DatasetList()
        is_enabled = \
            cls._volumes.config.allow_snapshot_restore_to_different_type
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_,
                    'restore_to_different_type_enabled': is_enabled}
            test_name = "{0}_to_other_is_{1}".format(
                vol_type.name, "allowed" if is_enabled else "disabled")
            dataset_list.append_new_dataset(test_name, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
示例#8
0
    def images_by_volume_type(
            cls, max_datasets=None, randomize=False, image_filter=None,
            volume_type_filter=None):
        """Returns a DatasetList of permuations of Volume Types and Images.
        Requests all available images and volume types from API, and applies
        image_filter and volume_type_filter if provided.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """

        image_list = cls._filter_model_list(cls._images(), image_filter)
        volume_type_list = cls._filter_model_list(
            cls._volume_types(), volume_type_filter)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            for img in image_list:
                data = {'volume_type': vol_type,
                        'image': img}
                testname = "{0}_volume_from_{1}_image".format(
                    vol_type.name,
                    str(img.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        if randomize:
            shuffle(dataset_list)

        if max_datasets:
            dataset_list = dataset_list[:max_datasets]

        return dataset_list
示例#9
0
 def default_volume_type(cls):
     vol_type = cls.default_volume_type_model()
     dataset = _Dataset(name=vol_type.name,
                        data_dict={
                            'volume_type_name': vol_type.name,
                            'volume_type_id': vol_type.id_
                        })
     dataset_list = DatasetList()
     dataset_list.append(dataset)
     return dataset_list
示例#10
0
 def default_volume_type(cls):
     vol_type = cls.default_volume_type_model()
     dataset = _Dataset(
         name=vol_type.name,
         data_dict={
             'volume_type_name': vol_type.name,
             'volume_type_id': vol_type.id_})
     dataset_list = DatasetList()
     dataset_list.append(dataset)
     return dataset_list
示例#11
0
    def volume_types(cls):
        """Returns a DatasetList of Volume Type names and id's"""

        cinder_cli = CinderCLI_Composite()
        volume_type_list = cinder_cli.behaviors.list_volume_types()
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_}
            dataset_list.append_new_dataset(vol_type.name, data)
        return dataset_list
示例#12
0
    def flavors_by_images_by_volume_type(
        cls,
        max_datasets=None,
        randomize=None,
        flavor_filter=None,
        volume_type_filter=None,
        image_filter=None,
        flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
        volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
        image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
    ):
        """Returns a DatasetList of all combinations of Flavors and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=image_filter,
                                            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(flavor_list,
                                             model_filter=flavor_filter,
                                             filter_mode=flavor_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list,
            model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images, flavors, and
        # volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for flavor in flavor_list:
                for image in image_list:
                    data = {
                        'volume_type': vtype,
                        'flavor': flavor,
                        'image': image
                    }
                    testname = \
                        "{flavor}_{image}_on_{vtype}".format(
                            flavor=str(flavor.name), image=str(image.name),
                            vtype=str(vtype.name)).replace(' ', '_').replace(
                            '.', '_').replace('(', '').replace(')', '')
                    dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#13
0
    def volume_types(
            cls, max_datasets=None, randomize=False, volume_type_filter=None):
        """Returns a DatasetList of Volume Type names and id's"""

        volume_type_list = cls._filter_model_list(
            cls._volume_types(), volume_type_filter)
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {'volume_type_name': vol_type.name,
                    'volume_type_id': vol_type.id_}
            dataset_list.append_new_dataset(vol_type.name, data)
        return dataset_list
示例#14
0
    def volume_types(cls):
        """Returns a DatasetList of Volume Type names and id's"""

        cinder_cli = CinderCLI_Composite()
        volume_type_list = cinder_cli.behaviors.list_volume_types()
        dataset_list = DatasetList()
        for vol_type in volume_type_list:
            data = {
                'volume_type_name': vol_type.name,
                'volume_type_id': vol_type.id_
            }
            dataset_list.append_new_dataset(vol_type.name, data)
        return dataset_list
示例#15
0
    def flavors_by_images_by_volume_type(
            cls, max_datasets=None, randomize=None,
            flavor_filter=None, volume_type_filter=None, image_filter=None,
            flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,):
        """Returns a DatasetList of all combinations of Flavors and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=image_filter,
            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(
            flavor_list, model_filter=flavor_filter,
            filter_mode=flavor_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list, model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images, flavors, and
        # volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for flavor in flavor_list:
                for image in image_list:
                    data = {'volume_type': vtype,
                            'flavor': flavor,
                            'image': image}
                    testname = \
                        "{flavor}_{image}_on_{vtype}".format(
                            flavor=str(flavor.name), image=str(image.name),
                            vtype=str(vtype.name)).replace(' ', '_').replace(
                            '.', '_').replace('(', '').replace(
                            ')', '').replace('/', '-')
                    dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
    def valid_quota_names(cls):

        """Creates a list of expected resource names"""

        quota_test_dataset = DatasetList()

        resources = ["snapshots", "volumes", "gigabytes"]
        vol_types = cls._get_volume_type_names()

        for resource in resources:
            quota_test_dataset.append_new_dataset(resource, {"quota_name": resource})

            for vol_name in vol_types:
                resource_key = "{resource}_{vol_name}".format(resource=resource, vol_name=vol_name)
                quota_test_dataset.append_new_dataset(resource_key, {"quota_name": resource_key})

        return quota_test_dataset
示例#17
0
def build_basic_dataset(data_dict, name):
    """
    @summary: Builds a dataset list from a dictionary of key-value pairs

    @param data_dict: Url amendments and values for the dataset list
    @type data_dict: Dictionary
    @param name: Name of the test parameter
    @type name: String

    @return: Dataset_List
    @rtype: DatasetList
    """

    dataset_list = DatasetList()

    for key, value in data_dict.iteritems():
        dataset_list.append_new_dataset(key, {name: value})

    return dataset_list
示例#18
0
def build_basic_dataset(data_dict, name):
    """
    @summary: Builds a dataset list from a dictionary of key-value pairs

    @param data_dict: Url amendments and values for the dataset list
    @type data_dict: Dictionary
    @param name: Name of the test parameter
    @type name: String

    @return: Dataset_List
    @rtype: DatasetList
    """

    dataset_list = DatasetList()

    for key, value in data_dict.iteritems():
        dataset_list.append_new_dataset(key, {name: value})

    return dataset_list
示例#19
0
    def _modify_dataset_list(
            cls, dataset_list, max_datasets=None, randomize=False):
        """Aggregates common modifiers for dataset lists"""

        if randomize:
            shuffle(dataset_list)

        if max_datasets:
            dataset_list = DatasetList(dataset_list[:max_datasets])

        return dataset_list
示例#20
0
    def flavors(
            cls, max_datasets=None, randomize=False, model_filter=None,
            filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Flavors
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(
            flavor_list, model_filter=model_filter, filter_mode=filter_mode)

        dataset_list = DatasetList()
        for flavor in flavor_list:
            data = {'flavor': flavor}
            dataset_list.append_new_dataset(
                str(flavor.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
示例#21
0
    def images(
            cls, max_datasets=None, randomize=False, model_filter=None,
            filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(
            image_list, model_filter=model_filter, filter_mode=filter_mode)

        dataset_list = DatasetList()
        for img in image_list:
            data = {'image': img}
            dataset_list.append_new_dataset(
                str(img.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(
            dataset_list, max_datasets=max_datasets, randomize=randomize)
示例#22
0
    def images_by_volume_type(
            cls,
            max_datasets=None,
            randomize=False,
            image_filter=None,
            volume_type_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Images and
        Volume Types.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=image_filter,
                                            filter_mode=image_filter_mode)

        volume_type_list = cls._get_volume_types()
        volume_type_list = cls._filter_model_list(
            volume_type_list,
            model_filter=volume_type_filter,
            filter_mode=volume_type_filter_mode)

        # Create dataset from all combinations of all images and volume types
        dataset_list = DatasetList()
        for vtype in volume_type_list:
            for image in image_list:
                data = {'volume_type': vtype, 'image': image}
                testname = \
                    "{0}_and_{1}".format(
                        str(vtype.name).replace(" ", "_"),
                        str(image.name).replace(" ", "_"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#23
0
    def images(cls,
               max_datasets=None,
               randomize=False,
               model_filter=None,
               filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=model_filter,
                                            filter_mode=filter_mode)

        dataset_list = DatasetList()
        for img in image_list:
            data = {'image': img}
            dataset_list.append_new_dataset(
                str(img.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#24
0
    def flavors(cls,
                max_datasets=None,
                randomize=False,
                model_filter=None,
                filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all Flavors
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(flavor_list,
                                             model_filter=model_filter,
                                             filter_mode=filter_mode)

        dataset_list = DatasetList()
        for flavor in flavor_list:
            data = {'flavor': flavor}
            dataset_list.append_new_dataset(
                str(flavor.name).replace(" ", "_").replace("/", "-"), data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#25
0
    def images_by_flavor(
            cls,
            max_datasets=None,
            randomize=False,
            image_filter=None,
            flavor_filter=None,
            image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
            flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
        """Returns a DatasetList of all combinations of Flavors and Images.
        Filters should be dictionaries with model attributes as keys and
        lists of attributes as key values
        """
        image_list = cls._get_images()
        image_list = cls._filter_model_list(image_list,
                                            model_filter=image_filter,
                                            filter_mode=image_filter_mode)

        flavor_list = cls._get_flavors()
        flavor_list = cls._filter_model_list(flavor_list,
                                             model_filter=flavor_filter,
                                             filter_mode=flavor_filter_mode)

        dataset_list = DatasetList()
        for image in image_list:
            for flavor in flavor_list:
                data = {'flavor': flavor, 'image': image}
                testname = \
                    "image_{0}_and_flavor_{1}".format(
                        str(image.name).replace(" ", "_").replace("/", "-"),
                        str(flavor.name).replace(" ", "_").replace("/", "-"))
                dataset_list.append_new_dataset(testname, data)

        # Apply modifiers
        return cls._modify_dataset_list(dataset_list,
                                        max_datasets=max_datasets,
                                        randomize=randomize)
示例#26
0
 def decorator(func):
     """Combines and stores DatasetLists in __data_driven_test_data__"""
     dep_message = "DatasetList object required for data_generator"
     combined_lists = kwargs.get("dataset_source") or DatasetList()
     for key, value in kwargs.items():
         if key != "dataset_source" and isinstance(value, DatasetList):
             value.apply_test_tags(key)
         elif not isinstance(value, DatasetList):
             warn(dep_message, DeprecationWarning)
         combined_lists += value
     for dataset_list in dataset_sources:
         if not isinstance(dataset_list, DatasetList):
             warn(dep_message, DeprecationWarning)
         combined_lists += dataset_list
     setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)
     return func
示例#27
0
    def test_skipped_fixture_does_not_raise_EmptyDSLError(self):
        """Ensure a skipped Fixture doesn't generate _FauxDSLFixtures"""

        # Minimal setup required to instantiate a DataDrivenClass
        os.environ['CAFE_ENGINE_CONFIG_FILE_PATH'] = '/tmp'
        os.environ['CAFE_TEST_LOG_PATH'] = '/tmp/log'

        # Define a skipped TestClass with an empty DSL
        @decorators.DataDrivenClass(DatasetList())
        @unittest.skip('This test is skipped')
        class MyTestCase(unittest.TestCase):
            """A dummy test fixture"""

        # Ensure that DSL_EXCEPTION class wanted generated
        module = import_module(MyTestCase.__module__)
        self.assertFalse(
            hasattr(module, 'MyTestCase_DSL_EXCEPTION_0'),
            'EmptyDSLError should not be raised on skipped tests')
示例#28
0
    def valid_quota_names(cls):
        """Creates a list of expected resource names"""

        quota_test_dataset = DatasetList()

        resources = ["snapshots", "volumes", "gigabytes"]
        vol_types = cls._get_volume_type_names()

        for resource in resources:
            quota_test_dataset.append_new_dataset(resource,
                                                  {"quota_name": resource})

            for vol_name in vol_types:
                resource_key = "{resource}_{vol_name}".format(
                    resource=resource, vol_name=vol_name)
                quota_test_dataset.append_new_dataset(
                    resource_key, {"quota_name": resource_key})

        return quota_test_dataset
from random import randint
from cloudroast.networking.networks.fixtures import NetworkingComputeFixture
from cloudroast.networking.networks.scenario.common import ScenarioMixin
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test, tags
from vnc_api import vnc_api
from cloudcafe.networking.networks.config import ContrailConfig
from cloudcafe.networking.networks.extensions.security_groups_api.composites \
    import SecurityGroupsComposite

CONF = ContrailConfig()

# Creating data sets for data driven test
data_set_list_lls = DatasetList()
data_set_list_lls.append_new_dataset(
        name='with_fabric_dns',
        data_dict={'lls_name': 'cloudroast-lls-%s' % randint(1, 9),
                   'lls_ip': '169.254.%s.%s' % (randint(0, 254),
                                                randint(1, 254)),
                   'lls_port': int(CONF.fabric_service_port),
                   'fabric_port': int(CONF.fabric_service_port),
                   'fabric_dns': CONF.fabric_service_name},
        tags=['scenario', 'sdn'])
data_set_list_lls.append_new_dataset(
        name='with_fabric_ip',
        data_dict={'lls_name': 'cloudroast-lls-%s' % randint(1, 9),
                   'lls_ip': '169.254.%s.%s' % (randint(0, 254),
                                                randint(1, 254)),
                   'lls_port': int(CONF.fabric_service_port),
                   'fabric_port': int(CONF.fabric_service_port),
示例#30
0
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import data_driven_test
from cafe.drivers.unittest.decorators import DataDrivenFixture
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cafe.drivers.unittest.datasets import DatasetList

CONTENT_TYPE_TEXT = "text/plain; charset=utf-8"
CONTENT_TYPE_XML = "application/xml; charset=utf-8"
CONTENT_TYPE_JSON = "application/json; charset=utf-8"
CONTAINER_NAME = "list_format_test_container"

data_set_list = DatasetList()

data_set_list.append_new_dataset("text", {
    "content_type": CONTENT_TYPE_TEXT,
    "headers": {}
})

data_set_list.append_new_dataset("json_header", {
    "content_type": CONTENT_TYPE_JSON,
    "headers": {
        "Accept": CONTENT_TYPE_JSON
    }
})

data_set_list.append_new_dataset("json_param", {
    "content_type": CONTENT_TYPE_JSON,
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, data_driven_test
from cloudroast.networking.networks.fixtures import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants import (
    SecurityGroupsErrorTypes,
    SecurityGroupsResponseCodes,
)


LONG_DESCRIPTION_DATA = "Long Security Group Test text description" * 10
LONG_NAME_DATA = "Long Security Group Test text name name name" * 10


# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name="w_name", data_dict={"name": "test_secgroup_create"}, tags=["sec_group", "post", "positive", "rbac_creator"]
)
data_set_list.append_new_dataset(
    name="w_long_name",
    data_dict={"name": "1234567890123456789012345678901234567890"},
    tags=["sec_group", "post", "positive", "rbac_creator"],
)
data_set_list.append_new_dataset(
    name="w_none_name_and_description",
    data_dict={
        "name": None,
        "expected_name": "",
        "description": None,
        "expected_description": "",
"""
import os
import json

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, data_driven_test
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools import randomstring as randstring
from cloudcafe.objectstorage.objectstorage_api.common.constants import Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

supported_formats = ["tar", "tar.gz", "tar.bz2"]
archive_formats = DatasetList()
for archive_format in supported_formats:
    for wrong_format in supported_formats:
        if archive_format == wrong_format:
            continue
        name = "{}-{}".format(archive_format, wrong_format)
        archive_formats.append_new_dataset(
            name, {"name": name, "archive_format": archive_format, "wrong_format": wrong_format}
        )


@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations:
    """
示例#33
0
limitations under the License.
"""

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes

LONG_DESCRIPTION_DATA = 'Long Security Group Test text description' * 10
LONG_NAME_DATA = 'Long Security Group Test text name name name' * 10

# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='w_name',
    data_dict={"name": 'test_secgroup_create'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_long_name',
    data_dict={"name": '1234567890123456789012345678901234567890'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_none_name_and_description',
    data_dict={
        "name": None,
        "expected_name": '',
        "description": None,
        "expected_description": '',
"""
import os

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (DataDrivenFixture,
                                              data_driven_test)
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools.md5hash import get_md5_hash
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
    Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

archive_formats = DatasetList()
archive_formats.append_new_dataset('tar', {
    'name': 'tar',
    'archive_format': 'tar'
})
archive_formats.append_new_dataset('tar.gz', {
    'name': 'tar.gz',
    'archive_format': 'tar.gz'
})
archive_formats.append_new_dataset('tar.bz2', {
    'name': 'tar.bz2',
    'archive_format': 'tar.bz2'
})


@DataDrivenFixture
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes


LONG_DESCRIPTION_DATA = 'Long Security Group Test text description' * 10
LONG_NAME_DATA = 'Long Security Group Test text name name name' * 10


# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='w_name',
    data_dict={"name": 'test_secgroup_name_update'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_description',
    data_dict={"description": 'Security Group updated description'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_name_and_description',
    data_dict={"name": 'test_secgroup_name_update',
               "description": 'Security Group updated description'},
    tags=['sec_group', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_blank_name_and_description',
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test, tags
from cloudcafe.networking.networks.config import NetworkingSecondUserConfig
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes

# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='',
    data_dict={'direction': 'egress'},
    tags=['sec_group_egress', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_icmp',
    data_dict={
        'protocol': 'icmp',
        'direction': 'egress'
    },
    tags=['sec_group_egress', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_tcp',
    data_dict={
        'protocol': 'tcp',
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import data_driven_test
from cafe.drivers.unittest.decorators import DataDrivenFixture
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cafe.drivers.unittest.datasets import DatasetList

CONTENT_TYPE_TEXT = "text/plain; charset=utf-8"
CONTENT_TYPE_XML = "application/xml; charset=utf-8"
CONTENT_TYPE_JSON = "application/json; charset=utf-8"
CONTAINER_NAME = "list_format_test_container"


data_set_list = DatasetList()

data_set_list.append_new_dataset("text", {"content_type": CONTENT_TYPE_TEXT, "headers": {}})

data_set_list.append_new_dataset(
    "json_header", {"content_type": CONTENT_TYPE_JSON, "headers": {"Accept": CONTENT_TYPE_JSON}}
)

data_set_list.append_new_dataset("json_param", {"content_type": CONTENT_TYPE_JSON, "params": {"format": "json"}})

data_set_list.append_new_dataset(
    "xml_header", {"content_type": CONTENT_TYPE_XML, "headers": {"Accept": CONTENT_TYPE_XML}}
)

data_set_list.append_new_dataset("xml_param", {"content_type": CONTENT_TYPE_XML, "params": {"format": "xml"}})
示例#38
0
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (DataDrivenFixture,
                                              data_driven_test)
from cafe.engine.http.client import HTTPClient
from cloudcafe.common.tools.check_dict import get_value
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
    Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_CONTAINER_NAME = 'tempurl'
CONTENT_TYPE_TEXT = 'text/plain; charset=UTF-8'
TEMPURL_KEY_LIFE = 20
EXPECTED_DISPOSITION = ("attachment; filename=\"{filename}\";"
                        " filename*=UTF-8\'\'{filename}")

sha_type = DatasetList()

sha_type.append_new_dataset("with_sha1", {"sha_type": "sha1"})
sha_type.append_new_dataset("with_sha2", {"sha_type": "sha2"})


@DataDrivenFixture
class TempUrlTest(ObjectStorageFixture):
    def _reset_default_key(self):
        response = None
        headers = {'X-Account-Meta-Temp-URL-Key': self.tempurl_key}
        response = self.client.set_temp_url_key(headers=headers)
        time.sleep(self.objectstorage_api_config.tempurl_key_cache_time)
        return response

    @classmethod
See the License for the specific language governing permissions and
limitations under the License.
"""
import os

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (
    DataDrivenFixture, data_driven_test)
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools.md5hash import get_md5_hash
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

archive_formats = DatasetList()
archive_formats.append_new_dataset(
    'tar', {'name': 'tar', 'archive_format': 'tar'})
archive_formats.append_new_dataset(
    'tar.gz', {'name': 'tar.gz', 'archive_format': 'tar.gz'})
archive_formats.append_new_dataset(
    'tar.bz2', {'name': 'tar.bz2', 'archive_format': 'tar.bz2'})


@DataDrivenFixture
class ExtractArchiveTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations
    Notes:
    The initial response status code is for initial the request.
    The object extraction status code is sent in the body of the
import os
import json

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (
    DataDrivenFixture, data_driven_test)
from cafe.engine.config import EngineConfig
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudcafe.common.tools import randomstring as randstring


BASE_NAME = "extract_archive"
HTTP_OK = 200

supported_formats = ['tar', 'tar.gz', 'tar.bz2']
archive_formats = DatasetList()
for archive_format in supported_formats:
    for wrong_format in supported_formats:
        if archive_format == wrong_format:
            continue
        name = '{}-{}'.format(archive_format, wrong_format)
        archive_formats.append_new_dataset(
            name, {'name': name,
                   'archive_format': archive_format,
                   'wrong_format': wrong_format})


@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations:
limitations under the License.
"""
import unittest

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
    data_driven_test, tags
from cloudcafe.networking.networks.config import NetworkingSecondUserConfig
from cloudroast.networking.networks.fixtures \
    import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
    import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes


# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
    name='',
    data_dict={},
    tags=['sdn', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_icmp',
    data_dict={'protocol': 'icmp'},
    tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_tcp',
    data_dict={'protocol': 'tcp'},
    tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
    name='w_protocol_udp',
    data_dict={'protocol': 'udp'},
示例#42
0
import json

from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (DataDrivenFixture,
                                              data_driven_test)
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools import randomstring as randstring
from cloudcafe.objectstorage.objectstorage_api.common.constants import \
    Constants
from cloudroast.objectstorage.fixtures import ObjectStorageFixture

BASE_NAME = "extract_archive"
HTTP_OK = 200

supported_formats = ['tar', 'tar.gz', 'tar.bz2']
archive_formats = DatasetList()
for archive_format in supported_formats:
    for wrong_format in supported_formats:
        if archive_format == wrong_format:
            continue
        name = '{}-{}'.format(archive_format, wrong_format)
        archive_formats.append_new_dataset(
            name, {
                'name': name,
                'archive_format': archive_format,
                'wrong_format': wrong_format
            })


@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
class ExtractArchiveTest(ObjectStorageFixture):
    """
    Tests Swfit expand archive operations
    Notes:
    The initial response status code is for initial the request.
    The object extraction status code is sent in the body of the
    response.
    """
    @classmethod
    def setUpClass(cls):
        super(ExtractArchiveTest, cls).setUpClass()
        cls.default_obj_name = Constants.VALID_OBJECT_NAME
        cls.data_dir = EngineConfig().data_directory
        cls.no_compression = None
        cls.storage_url = cls.client.storage_url
        cls.archive_paths = {}
        cls.obj_names = []
        cls.obj_names_with_slashes = []
        cls.obj_names_without_slashes = []

        cls.num_archive_files = 20
        for num in range(cls.num_archive_files):
            if num < 10:
                cls.obj_names_with_slashes.append(
                    "{0}_test{1}/{0}_obj_{1}".format(BASE_NAME, num))
            else:
                cls.obj_names_without_slashes.append("{0}_obj_{1}".format(
                    BASE_NAME, num))

        cls.obj_names = \
            cls.obj_names_with_slashes + cls.obj_names_without_slashes

        tar_archive = cls.client.create_archive(cls.obj_names, None)
        cls.archive_paths["tar"] = tar_archive

        gz_archive = cls.client.create_archive(cls.obj_names, "gz")
        cls.archive_paths["tar.gz"] = gz_archive

        bz2_archive = cls.client.create_archive(cls.obj_names, "bz2")
        cls.archive_paths["tar.bz2"] = bz2_archive

    @classmethod
    def tearDownClass(cls):
        super(ExtractArchiveTest, cls).setUpClass()
        for key in cls.archive_paths.keys():
            os.remove(cls.archive_paths[key])

    def read_archive_data(self, archive_path):
        archive_data = None

        archive_file = open(archive_path, 'r')
        archive_data = archive_file.read()
        archive_file.close()

        return archive_data

    @data_driven_test(DatasetList(archive_formats))
    @ObjectStorageFixture.required_features('bulk_upload')
    def ddtest_extract_archive_to_existing_container(self, archive_format,
                                                     **kwargs):
        """
        Scenario: upload a archive with the extract-archive query string
        parameter

        Precondition: Container exists

        Expected Results: archive is extracted to objects in an
        existing container

        @param archive_format: Type of archive file to upload.
        @type  archive_format: string
        """
        container_name = self.create_temp_container(descriptor=BASE_NAME)

        data = self.read_archive_data(self.archive_paths[archive_format])

        headers = {'Accept': 'application/json'}

        response = self.client.create_archive_object(
            data, archive_format, upload_path=container_name, headers=headers)

        expected = HTTP_OK
        received = response.status_code
        self.assertEqual(
            expected, received,
            "extract tar archive expected successful status code: {0}"
            " received: {1}".format(expected, received))

        # inspect the body of the response
        expected = self.num_archive_files
        received = int(response.entity.num_files_created)
        self.assertEqual(
            expected,
            received,
            msg="response body 'Number Files Created' expected: {0}"
            " received {1}".format(expected, received))

        expected = '201 Created'
        received = response.entity.status
        self.assertEqual(expected,
                         received,
                         msg="response body 'Response Status' expected: {0}"
                         " received {1}".format(expected, received))

        expected = 0
        received = len(response.entity.errors)
        self.assertEqual(
            expected,
            received,
            msg="response body 'Errors' expected None received {0}".format(
                response.entity.errors))

        # check the actual number of objects and object names
        params = {'format': 'json'}
        response_objects = self.behaviors.list_objects(
            container_name, params=params, expected_objects=self.obj_names)

        expected = self.num_archive_files
        received = len(response_objects)
        self.assertEqual(expected,
                         received,
                         msg="container list expected: {0} extracted objects."
                         " received: {1} extracted objects".format(
                             expected, received))

        # check that all the objects where extracted to the existing container
        resp_obj_names = [obj.name for obj in response_objects]

        self.assertEqual(sorted(self.obj_names), sorted(resp_obj_names))

        # check that the content of the obj is correct
        # the content should be the md5hash of the objects name
        for obj_name in resp_obj_names:
            # the content of the obj should be the md5 sum of the obj name
            response = self.client.get_object(container_name, obj_name)

            expected = get_md5_hash(obj_name)
            received = response.content
            self.assertEqual(
                expected,
                received,
                msg="obj content expected: {0} received: {1}".format(
                    expected, received))

    @data_driven_test(DatasetList(archive_formats))
    @ObjectStorageFixture.required_features('bulk_upload')
    def ddtest_extract_archive_without_existing_container(
            self, archive_format, **kwargs):
        """
        Scenario: upload a archived file with the extract-archive query string
        parameter

        Precondition: Container does not exist

        Expected Results: archive with object names containing slashes are
        extracted to objects. names without slashes are ignored

        @param archive_format: Type of archive file to upload.
        @type  archive_format: string
        """
        expected_listings = {}
        for name in self.obj_names_with_slashes:
            container_name, object_name = name.split('/', 1)
            if container_name not in expected_listings:
                expected_listings[container_name] = []
            expected_listings[container_name].append(object_name)
        expected_containers = list(expected_listings.iterkeys())
        self.addCleanup(self.behaviors.force_delete_containers,
                        list(expected_listings.iterkeys()))

        data = self.read_archive_data(self.archive_paths[archive_format])

        headers = {'Accept': 'application/json'}

        response = self.client.create_archive_object(data,
                                                     archive_format,
                                                     headers=headers)

        expected = HTTP_OK
        received = response.status_code
        self.assertEqual(
            expected, received,
            "extract tar archive expected successful status code: {0}"
            " received: {1}".format(expected, received))

        # inspect the body of the response
        expected = len(self.obj_names_with_slashes)
        received = int(response.entity.num_files_created)
        self.assertEqual(
            expected,
            received,
            msg="response body 'Number Files Created' expected: {0}"
            " received {1}".format(expected, received))

        expected = '201 Created'
        received = response.entity.status
        self.assertEqual(expected,
                         received,
                         msg="response body 'Response Status' expected: {0}"
                         " received {1}".format(expected, received))

        expected = 0
        received = len(response.entity.errors)
        self.assertEqual(
            expected,
            received,
            msg="response body 'Errors' expected None received {0}".format(
                response.entity.errors))

        # check the actual number of objects and object names
        params = {'format': 'json', 'marker': BASE_NAME}

        response = self.behaviors.list_containers(
            params=params, expected_containers=expected_containers)

        resp_container_names = []
        resp_container_names[:-1] = [container.name for container in response]

        # archive object names without slashes are ignored
        for name in self.obj_names_without_slashes:
            self.assertNotIn(name, resp_container_names)

        # names with slashes should create containers with objects in them
        for container_name in expected_containers:
            """
            an archive named foo/bar will create a container named 'foo'
            with an object named 'bar' in it.
            """
            expected_objects = expected_listings.get(container_name)

            # check to see if the expected container name is in the container
            # list response
            self.assertTrue(container_name in resp_container_names)

            # check to see if the expected number of objects and obj name are
            # in the obj list response
            params = {'format': 'json'}
            response_objects = self.behaviors.list_objects(
                container_name,
                params=params,
                expected_objects=expected_objects)

            resp_obj_names = [obj.name for obj in response_objects]

            expected = len(expected_objects)
            received = len(resp_obj_names)
            self.assertEqual(
                expected,
                received,
                msg="container list expected: {0} extracted objects."
                " received: {1} extracted objects".format(expected, received))

            for object_name in expected_objects:
                self.assertIn(object_name, resp_obj_names)

                # the content of the obj should be the md5 sum of the obj name
                response = self.client.get_object(container_name, object_name)

                expected = get_md5_hash('{}/{}'.format(container_name,
                                                       object_name))
                received = response.content
                self.assertEqual(
                    expected,
                    received,
                    msg="obj content expected: {0} received: {1}".format(
                        expected, received))

    @data_driven_test(DatasetList(archive_formats))
    @ObjectStorageFixture.required_features('bulk_upload')
    def ddtest_object_creation_with_archive(self, archive_format, **kwargs):
        """
        Scenario: archive file is uploaded without the extract-archive query
        string parameter.

        Expected Results: contents of the archive are not expanded into objects

        @param archive_format: Type of archive file to upload.
        @type  archive_format: string
        """
        container_name = self.create_temp_container(descriptor=BASE_NAME)

        object_data = self.read_archive_data(
            self.archive_paths[archive_format])
        headers = {'Content-Length': str(len(object_data))}
        obj_name = "{0}_{1}".format(BASE_NAME, self.default_obj_name)
        response = self.client.create_object(container_name,
                                             obj_name,
                                             data=object_data,
                                             headers=headers)

        expected = 201
        received = response.status_code
        self.assertEqual(
            expected, received,
            "object creation with tar archive expected successful status"
            " code: {0} received: {1}".format(expected, received))

        params = {'format': 'json'}
        response = self.client.list_objects(container_name, params=params)

        expected = 1
        received = len(response.entity)
        self.assertEqual(expected,
                         received,
                         msg="container list expected: {0} objects."
                         " received: {1} objects".format(expected, received))

        resp_obj = response.entity[0]

        self.assertEqual(obj_name, resp_obj.name)

        response = self.client.get_object(container_name, obj_name)

        self.assertGreater(response.headers.get('content-length'), 0)