Exemplo n.º 1
0
def data(TEST):
    TEST.cinder_services = utils.TestDataContainer()
    TEST.cinder_volumes = utils.TestDataContainer()
    TEST.cinder_volume_backups = utils.TestDataContainer()
    TEST.cinder_volume_encryption_types = utils.TestDataContainer()
    TEST.cinder_volume_types = utils.TestDataContainer()
    TEST.cinder_volume_encryption = utils.TestDataContainer()
    TEST.cinder_bootable_volumes = utils.TestDataContainer()
    TEST.cinder_qos_specs = utils.TestDataContainer()
    TEST.cinder_qos_spec_associations = utils.TestDataContainer()
    TEST.cinder_volume_snapshots = utils.TestDataContainer()
    TEST.cinder_quotas = utils.TestDataContainer()
    TEST.cinder_quota_usages = utils.TestDataContainer()
    TEST.cinder_availability_zones = utils.TestDataContainer()
    TEST.cinder_volume_transfers = utils.TestDataContainer()
    TEST.cinder_pools = utils.TestDataContainer()
    TEST.cinder_consistencygroups = utils.TestDataContainer()
    TEST.cinder_cgroup_volumes = utils.TestDataContainer()

    # Services
    service_1 = services.Service(
        services.ServiceManager(None), {
            "service": "cinder-scheduler",
            "status": "enabled",
            "binary": "cinder-scheduler",
            "zone": "internal",
            "state": "up",
            "updated_at": "2013-07-08T05:21:00.000000",
            "host": "devstack001",
            "disabled_reason": None
        })

    service_2 = services.Service(
        services.ServiceManager(None), {
            "service": "cinder-volume",
            "status": "enabled",
            "binary": "cinder-volume",
            "zone": "nova",
            "state": "up",
            "updated_at": "2013-07-08T05:20:51.000000",
            "host": "devstack001",
            "disabled_reason": None
        })
    TEST.cinder_services.add(service_1)
    TEST.cinder_services.add(service_2)

    # Volumes - Cinder v1
    volume = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
            'status': 'available',
            'size': 40,
            'display_name': 'Volume name',
            'display_description': 'Volume description',
            'created_at': '2014-01-27 10:30:00',
            'volume_type': None,
            'attachments': []
        })
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            "id": "4b069dd0-6eaa-4272-8abc-5448a68f1cce",
            "status": 'available',
            "size": 10,
            "display_name": '',
            "display_description": '',
            "device": "/dev/hda",
            "created_at": '2010-11-21 18:34:25',
            "volume_type": 'vol_type_1',
            "attachments": []
        })
    other_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "21023e92-8008-1234-8059-7f2293ff3889",
            'status': 'in-use',
            'size': 10,
            'display_name': u'my_volume',
            'display_description': '',
            'created_at': '2013-04-01 10:30:00',
            'volume_type': None,
            'attachments': [{
                "id": "1",
                "server_id": '1',
                "device": "/dev/hda"
            }]
        })
    volume_with_type = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "7dcb47fd-07d9-42c2-9647-be5eab799ebe",
            'name': 'my_volume2',
            'status': 'in-use',
            'size': 10,
            'display_name': u'my_volume2',
            'display_description': '',
            'created_at': '2013-04-01 10:30:00',
            'volume_type': 'vol_type_2',
            'attachments': [{
                "id": "2",
                "server_id": '2',
                "device": "/dev/hdb"
            }]
        })
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "21023e92-8008-1234-8059-7f2293ff3890",
            'status': 'in-use',
            'size': 10,
            'display_name': u'my_volume',
            'display_description': '',
            'created_at': '2013-04-01 10:30:00',
            'volume_type': None,
            'bootable': False,
            'attachments': [{
                "id": "1",
                "server_id": '1',
                "device": "/dev/hda"
            }]
        })

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    other_volume.bootable = 'true'

    TEST.cinder_volumes.add(api.cinder.Volume(volume))
    TEST.cinder_volumes.add(api.cinder.Volume(nameless_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(other_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(volume_with_type))

    TEST.cinder_bootable_volumes.add(api.cinder.Volume(non_bootable_volume))

    vol_type1 = volume_types.VolumeType(
        volume_types.VolumeTypeManager(None), {
            'id': u'1',
            'name': u'vol_type_1',
            'description': 'type 1 description',
            'extra_specs': {
                'foo': 'bar'
            }
        })
    vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None), {
        'id': u'2',
        'name': u'vol_type_2',
        'description': 'type 2 description'
    })
    TEST.cinder_volume_types.add(vol_type1, vol_type2)

    # Volumes - Cinder v2
    volume_v2 = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "31023e92-8008-4c8b-8059-7f2293ff1234",
            'name': 'v2_volume',
            'description': "v2 Volume Description",
            'status': 'available',
            'size': 20,
            'created_at': '2014-01-27 10:30:00',
            'volume_type': None,
            'os-vol-host-attr:host': 'host@backend-name#pool',
            'bootable': 'true',
            'attachments': []
        })
    volume_v2.bootable = 'true'

    TEST.cinder_volumes.add(api.cinder.Volume(volume_v2))

    snapshot = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
            'display_name': 'test snapshot',
            'display_description': 'volume snapshot',
            'size': 40,
            'status': 'available',
            'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'
        })
    snapshot2 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0f',
            'name': '',
            'description': 'v2 volume snapshot description',
            'size': 80,
            'status': 'available',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })
    snapshot3 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0e',
            'name': '',
            'description': 'v2 volume snapshot description 2',
            'size': 80,
            'status': 'available',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })

    snapshot.bootable = 'true'
    snapshot2.bootable = 'true'

    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot2))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot3))
    TEST.cinder_volume_snapshots.first()._volume = volume

    # Volume Type Encryption
    vol_enc_type1 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {
            'volume_type_id': u'1',
            'control_location': "front-end",
            'key_size': 512,
            'provider': "a-provider",
            'cipher': "a-cipher"
        })
    vol_enc_type2 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {
            'volume_type_id': u'2',
            'control_location': "front-end",
            'key_size': 256,
            'provider': "a-provider",
            'cipher': "a-cipher"
        })
    vol_unenc_type1 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {})
    TEST.cinder_volume_encryption_types.add(vol_enc_type1, vol_enc_type2,
                                            vol_unenc_type1)

    volume_backup1 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None), {
            'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
            'name': 'backup1',
            'description': 'volume backup 1',
            'size': 10,
            'status': 'available',
            'container_name': 'volumebackups',
            'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'
        })

    volume_backup2 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None), {
            'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e52',
            'name': 'backup2',
            'description': 'volume backup 2',
            'size': 20,
            'status': 'available',
            'container_name': 'volumebackups',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })

    volume_backup3 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None), {
            'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e53',
            'name': 'backup3',
            'description': 'volume backup 3',
            'size': 20,
            'status': 'available',
            'container_name': 'volumebackups',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })

    TEST.cinder_volume_backups.add(volume_backup1)
    TEST.cinder_volume_backups.add(volume_backup2)
    TEST.cinder_volume_backups.add(volume_backup3)

    # Volume Encryption
    vol_enc_metadata1 = volumes.Volume(
        volumes.VolumeManager(None), {
            'cipher': 'test-cipher',
            'key_size': 512,
            'provider': 'test-provider',
            'control_location': 'front-end'
        })
    vol_unenc_metadata1 = volumes.Volume(volumes.VolumeManager(None), {})
    TEST.cinder_volume_encryption.add(vol_enc_metadata1)
    TEST.cinder_volume_encryption.add(vol_unenc_metadata1)

    # Quota Sets
    quota_data = dict(volumes='1', snapshots='1', gigabytes='1000')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.cinder_quotas.add(api.base.QuotaSet(quota))

    # Quota Usages
    quota_usage_data = {
        'gigabytes': {
            'used': 0,
            'quota': 1000
        },
        'instances': {
            'used': 0,
            'quota': 10
        },
        'snapshots': {
            'used': 0,
            'quota': 10
        }
    }
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(api.base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.cinder_quota_usages.add(quota_usage)

    # Availability Zones
    # Cinder returns the following structure from os-availability-zone
    # {"availabilityZoneInfo":
    # [{"zoneState": {"available": true}, "zoneName": "nova"}]}
    # Note that the default zone is still "nova" even though this is cinder
    TEST.cinder_availability_zones.add(
        availability_zones.AvailabilityZone(
            availability_zones.AvailabilityZoneManager(None), {
                'zoneName': 'nova',
                'zoneState': {
                    'available': True
                }
            }))
    # Cinder Limits
    limits = {
        "absolute": {
            "totalVolumesUsed": 1,
            "totalGigabytesUsed": 5,
            "maxTotalVolumeGigabytes": 1000,
            "maxTotalVolumes": 10
        }
    }
    TEST.cinder_limits = limits

    # QOS Specs
    qos_spec1 = qos_specs.QoSSpecs(
        qos_specs.QoSSpecsManager(None), {
            "id": "418db45d-6992-4674-b226-80aacad2073c",
            "name": "high_iops",
            "consumer": "back-end",
            "specs": {
                "minIOPS": "1000",
                "maxIOPS": '100000'
            }
        })
    qos_spec2 = qos_specs.QoSSpecs(
        qos_specs.QoSSpecsManager(None), {
            "id": "6ed7035f-992e-4075-8ed6-6eff19b3192d",
            "name": "high_bws",
            "consumer": "back-end",
            "specs": {
                "maxBWS": '5000'
            }
        })

    TEST.cinder_qos_specs.add(qos_spec1, qos_spec2)
    vol_type1.associated_qos_spec = qos_spec1.name
    TEST.cinder_qos_spec_associations.add(vol_type1)

    # volume_transfers
    transfer_1 = volume_transfers.VolumeTransfer(
        volume_transfers.VolumeTransferManager(None), {
            'id': '99999999-8888-7777-6666-555555555555',
            'name': 'test transfer',
            'volume_id': volume.id,
            'auth_key': 'blah',
            'created_at': ''
        })
    TEST.cinder_volume_transfers.add(transfer_1)

    # Pools
    pool1 = pools.Pool(
        pools.PoolManager(None), {
            "QoS_support": False,
            "allocated_capacity_gb": 0,
            "driver_version": "3.0.0",
            "free_capacity_gb": 10,
            "extra_specs": {
                "description": "LVM Extra specs",
                "display_name": "LVMDriver",
                "namespace": "OS::Cinder::LVMDriver",
                "type": "object",
            },
            "name": "devstack@lvmdriver-1#lvmdriver-1",
            "pool_name": "lvmdriver-1",
            "reserved_percentage": 0,
            "storage_protocol": "iSCSI",
            "total_capacity_gb": 10,
            "vendor_name": "Open Source",
            "volume_backend_name": "lvmdriver-1"
        })

    pool2 = pools.Pool(
        pools.PoolManager(None), {
            "QoS_support": False,
            "allocated_capacity_gb": 2,
            "driver_version": "3.0.0",
            "free_capacity_gb": 15,
            "extra_specs": {
                "description": "LVM Extra specs",
                "display_name": "LVMDriver",
                "namespace": "OS::Cinder::LVMDriver",
                "type": "object",
            },
            "name": "devstack@lvmdriver-2#lvmdriver-2",
            "pool_name": "lvmdriver-2",
            "reserved_percentage": 0,
            "storage_protocol": "iSCSI",
            "total_capacity_gb": 10,
            "vendor_name": "Open Source",
            "volume_backend_name": "lvmdriver-2"
        })

    TEST.cinder_pools.add(pool1)
    TEST.cinder_pools.add(pool2)

    # volume consistency groups
    cgroup_1 = consistencygroups.Consistencygroup(
        consistencygroups.ConsistencygroupManager(None), {
            'id': u'1',
            'name': u'cg_1',
            'description': 'cg 1 description',
            'volume_types': u'1',
            'volume_type_names': []
        })

    cgroup_2 = consistencygroups.Consistencygroup(
        consistencygroups.ConsistencygroupManager(None), {
            'id': u'2',
            'name': u'cg_2',
            'description': 'cg 2 description',
            'volume_types': u'1',
            'volume_type_names': []
        })

    TEST.cinder_consistencygroups.add(cgroup_1)
    TEST.cinder_consistencygroups.add(cgroup_2)

    volume_for_consistency_group = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "11023e92-8008-4c8b-8059-7f2293ff3881",
            'status': 'available',
            'size': 40,
            'display_name': 'Volume name',
            'display_description': 'Volume description',
            'created_at': '2014-01-27 10:30:00',
            'volume_type': None,
            'attachments': [],
            'consistencygroup_id': u'1'
        })
    TEST.cinder_cgroup_volumes.add(
        api.cinder.Volume(volume_for_consistency_group))
Exemplo n.º 2
0
def data(TEST):

    # OvercloudPlan
    TEST.tuskarclient_plans = test_data_utils.TestDataContainer()
    plan_1 = {
        'id':
        'plan-1',
        'name':
        'overcloud',
        'description':
        'this is an overcloud deployment plan',
        'template':
        '',
        'created_at':
        '2014-05-27T21:11:09Z',
        'modified_at':
        '2014-05-30T21:11:09Z',
        'roles': [{
            'id': 'role-1',
            'name': 'Controller',
            'version': 1,
        }, {
            'id': 'role-2',
            'name': 'Compute',
            'version': 1,
        }, {
            'id': 'role-3',
            'name': 'Object Storage',
            'version': 1,
        }, {
            'id': 'role-4',
            'name': 'Block Storage',
            'version': 1,
        }],
        'parameters': [{
            'name': 'ControllerNodeCount',
            'label': 'Controller Node Count',
            'description': 'Controller node count',
            'hidden': 'false',
            'value': 1,
        }, {
            'name': 'ComputeNodeCount',
            'label': 'Compute Node Count',
            'description': 'Compute node count',
            'hidden': 'false',
            'value': 42,
        }, {
            'name': 'Block StorageNodeCount',
            'label': 'Block Sorage Node Count',
            'description': 'Block storage node count',
            'hidden': 'false',
            'value': 5,
        }, {
            'name': 'ControllerFlavorID',
            'label': 'Controller Flavor ID',
            'description': 'Controller flavor ID',
            'hidden': 'false',
            'value': '1',
        }, {
            'name': 'ComputeFlavorID',
            'label': 'Compute Flavor ID',
            'description': 'Compute flavor ID',
            'hidden': 'false',
            'value': '1',
        }, {
            'name': 'Block StorageFlavorID',
            'label': 'Block Storage Flavor ID',
            'description': 'Block storage flavor ID',
            'hidden': 'false',
            'value': '2',
        }, {
            'name': 'ControllerImageID',
            'label': 'Controller Image ID',
            'description': 'Controller image ID',
            'hidden': 'false',
            'value': '2',
        }, {
            'name': 'ComputeImageID',
            'label': 'Compute Image ID',
            'description': 'Compute image ID',
            'hidden': 'false',
            'value': '1',
        }, {
            'name': 'Block StorageImageID',
            'label': 'Block Storage Image ID',
            'description': 'Block storage image ID',
            'hidden': 'false',
            'value': '4',
        }, {
            'name': 'controller_NovaInterfaces',
            'parameter_group': 'Nova',
            'type': 'String',
            'description': '',
            'no_echo': 'false',
            'default': 'eth0',
        }, {
            'name': 'controller_NeutronInterfaces',
            'parameter_group': 'Neutron',
            'type': 'String',
            'description': '',
            'no_echo': 'false',
            'default': 'eth0',
        }, {
            'name': 'compute_KeystoneHost',
            'parameter_group': 'Keystone',
            'type': 'String',
            'description': '',
            'no_echo': 'false',
            'default': '',
        }, {
            'name': 'object_storage_SwiftHashSuffix',
            'parameter_group': 'Swift',
            'type': 'String',
            'description': '',
            'no_echo': 'true',
            'default': '',
        }, {
            'name': 'block_storage_NeutronNetworkType',
            'parameter_group': 'Neutron',
            'type': 'String',
            'description': '',
            'no_echo': 'false',
            'default': 'gre',
        }, {
            'name': 'AdminPassword',
            'label': 'Admin Password',
            'description': 'Admin password',
            'hidden': 'false',
            'value': 'unset',
        }],
    }
    TEST.tuskarclient_plans.add(plan_1)

    # OvercloudRole
    TEST.tuskarclient_roles = test_data_utils.TestDataContainer()
    r_1 = {
        'id': 'role-1',
        'name': 'Controller',
        'version': 1,
        'description': 'controller role',
        'created_at': '2014-05-27T21:11:09Z'
    }
    r_2 = {
        'id': 'role-2',
        'name': 'Compute',
        'version': 1,
        'description': 'compute role',
        'created_at': '2014-05-27T21:11:09Z'
    }
    r_3 = {
        'id': 'role-3',
        'name': 'Object Storage',
        'version': 1,
        'description': 'object storage role',
        'created_at': '2014-05-27T21:11:09Z'
    }
    r_4 = {
        'id': 'role-4',
        'name': 'Block Storage',
        'version': 1,
        'description': 'block storage role',
        'created_at': '2014-05-27T21:11:09Z'
    }
    TEST.tuskarclient_roles.add(r_1, r_2, r_3, r_4)
Exemplo n.º 3
0
def data(TEST):
    TEST.servers = utils.TestDataContainer()
    TEST.flavors = utils.TestDataContainer()
    TEST.keypairs = utils.TestDataContainer()
    TEST.security_groups = utils.TestDataContainer()
    TEST.security_groups_uuid = utils.TestDataContainer()
    TEST.security_group_rules = utils.TestDataContainer()
    TEST.security_group_rules_uuid = utils.TestDataContainer()
    TEST.volumes = utils.TestDataContainer()
    TEST.quotas = utils.TestDataContainer()
    TEST.quota_usages = utils.TestDataContainer()
    TEST.floating_ips = utils.TestDataContainer()
    TEST.floating_ips_uuid = utils.TestDataContainer()
    TEST.usages = utils.TestDataContainer()
    TEST.certs = utils.TestDataContainer()
    TEST.volume_snapshots = utils.TestDataContainer()
    TEST.volume_types = utils.TestDataContainer()
    TEST.availability_zones = utils.TestDataContainer()
    TEST.hypervisors = utils.TestDataContainer()
    TEST.services = utils.TestDataContainer()
    TEST.aggregates = utils.TestDataContainer()

    # Data return by novaclient.
    # It is used if API layer does data conversion.
    TEST.api_floating_ips = utils.TestDataContainer()
    TEST.api_floating_ips_uuid = utils.TestDataContainer()

    # Volumes
    volume = volumes.Volume(
        volumes.VolumeManager(None),
        dict(id="41023e92-8008-4c8b-8059-7f2293ff3775",
             name='test_volume',
             status='available',
             size=40,
             display_name='Volume name',
             created_at='2012-04-01 10:30:00',
             volume_type=None,
             attachments=[]))
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None),
        dict(id="3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
             name='',
             status='in-use',
             size=10,
             display_name='',
             display_description='',
             device="/dev/hda",
             created_at='2010-11-21 18:34:25',
             volume_type='vol_type_1',
             attachments=[{
                 "id": "1",
                 "server_id": '1',
                 "device": "/dev/hda"
             }]))
    attached_volume = volumes.Volume(
        volumes.VolumeManager(None),
        dict(id="8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
             name='my_volume',
             status='in-use',
             size=30,
             display_name='My Volume',
             display_description='',
             device="/dev/hdk",
             created_at='2011-05-01 11:54:33',
             volume_type='vol_type_2',
             attachments=[{
                 "id": "2",
                 "server_id": '1',
                 "device": "/dev/hdk"
             }]))
    TEST.volumes.add(volume)
    TEST.volumes.add(nameless_volume)
    TEST.volumes.add(attached_volume)

    vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None), {
        'id': 1,
        'name': 'vol_type_1'
    })
    vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None), {
        'id': 2,
        'name': 'vol_type_2'
    })
    TEST.volume_types.add(vol_type1, vol_type2)

    # Flavors
    flavor_1 = flavors.Flavor(
        flavors.FlavorManager(None), {
            'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
            'name': 'm1.tiny',
            'vcpus': 1,
            'disk': 0,
            'ram': 512,
            'swap': 0,
            'extra_specs': {},
            'OS-FLV-EXT-DATA:ephemeral': 0
        })
    flavor_2 = flavors.Flavor(
        flavors.FlavorManager(None), {
            'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
            'name': 'm1.massive',
            'vcpus': 1000,
            'disk': 1024,
            'ram': 10000,
            'swap': 0,
            'extra_specs': {
                'Trusted': True,
                'foo': 'bar'
            },
            'OS-FLV-EXT-DATA:ephemeral': 2048
        })
    TEST.flavors.add(flavor_1, flavor_2)

    # Keypairs
    keypair = keypairs.Keypair(keypairs.KeypairManager(None),
                               dict(name='keyName'))
    TEST.keypairs.add(keypair)

    # Security Groups and Rules
    def generate_security_groups(is_uuid=False):
        def get_id(is_uuid):
            global current_int_id
            if is_uuid:
                return str(uuid.uuid4())
            else:
                get_id.current_int_id += 1
                return get_id.current_int_id

        get_id.current_int_id = 0

        sg_manager = sec_groups.SecurityGroupManager(None)
        rule_manager = rules.SecurityGroupRuleManager(None)

        sec_group_1 = sec_groups.SecurityGroup(
            sg_manager, {
                "rules": [],
                "tenant_id": TEST.tenant.id,
                "id": get_id(is_uuid),
                "name": u"default",
                "description": u"default"
            })
        sec_group_2 = sec_groups.SecurityGroup(
            sg_manager, {
                "rules": [],
                "tenant_id": TEST.tenant.id,
                "id": get_id(is_uuid),
                "name": u"other_group",
                "description": u"NotDefault."
            })
        sec_group_3 = sec_groups.SecurityGroup(
            sg_manager, {
                "rules": [],
                "tenant_id": TEST.tenant.id,
                "id": get_id(is_uuid),
                "name": u"another_group",
                "description": u"NotDefault."
            })

        rule = {
            'id': get_id(is_uuid),
            'group': {},
            'ip_protocol': u"tcp",
            'from_port': u"80",
            'to_port': u"80",
            'parent_group_id': sec_group_1.id,
            'ip_range': {
                'cidr': u"0.0.0.0/32"
            }
        }

        icmp_rule = {
            'id': get_id(is_uuid),
            'group': {},
            'ip_protocol': u"icmp",
            'from_port': u"9",
            'to_port': u"5",
            'parent_group_id': sec_group_1.id,
            'ip_range': {
                'cidr': u"0.0.0.0/32"
            }
        }

        group_rule = {
            'id': 3,
            'group': {},
            'ip_protocol': u"tcp",
            'from_port': u"80",
            'to_port': u"80",
            'parent_group_id': sec_group_1.id,
            'source_group_id': sec_group_1.id
        }

        rule_obj = rules.SecurityGroupRule(rule_manager, rule)
        rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
        rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)

        sec_group_1.rules = [rule_obj]
        sec_group_2.rules = [rule_obj]

        return {
            "rules": [rule_obj, rule_obj2, rule_obj3],
            "groups": [sec_group_1, sec_group_2, sec_group_3]
        }

    sg_data = generate_security_groups()
    TEST.security_group_rules.add(*sg_data["rules"])
    TEST.security_groups.add(*sg_data["groups"])

    sg_uuid_data = generate_security_groups(is_uuid=True)
    TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
    TEST.security_groups_uuid.add(*sg_uuid_data["groups"])

    # Quota Sets
    quota_data = dict(metadata_items='1',
                      injected_file_content_bytes='1',
                      volumes='1',
                      gigabytes='1000',
                      ram=10000,
                      floating_ips='1',
                      fixed_ips='10',
                      instances='10',
                      injected_files='1',
                      cores='10',
                      security_groups='10',
                      security_group_rules='20')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.quotas.nova = base.QuotaSet(quota)
    TEST.quotas.add(base.QuotaSet(quota))

    # Quota Usages
    quota_usage_data = {
        'gigabytes': {
            'used': 0,
            'quota': 1000
        },
        'instances': {
            'used': 0,
            'quota': 10
        },
        'ram': {
            'used': 0,
            'quota': 10000
        },
        'cores': {
            'used': 0,
            'quota': 20
        },
        'floating_ips': {
            'used': 0,
            'quota': 10
        },
        'volumes': {
            'used': 0,
            'quota': 10
        }
    }
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.quota_usages.add(quota_usage)

    # Limits
    limits = {
        "absolute": {
            "maxImageMeta": 128,
            "maxPersonality": 5,
            "maxPersonalitySize": 10240,
            "maxSecurityGroupRules": 20,
            "maxSecurityGroups": 10,
            "maxServerMeta": 128,
            "maxTotalCores": 20,
            "maxTotalFloatingIps": 10,
            "maxTotalInstances": 10,
            "maxTotalKeypairs": 100,
            "maxTotalRAMSize": 10000,
            "totalCoresUsed": 0,
            "totalInstancesUsed": 0,
            "totalKeyPairsUsed": 0,
            "totalRAMUsed": 0,
            "totalSecurityGroupsUsed": 0
        }
    }
    TEST.limits = limits

    # Servers
    tenant3 = TEST.tenants.list()[2]

    vals = {
        "host": "http://nova.example.com:8774",
        "name": "server_1",
        "status": "ACTIVE",
        "tenant_id": TEST.tenants.first().id,
        "user_id": TEST.user.id,
        "server_id": "1",
        "flavor_id": flavor_1.id,
        "image_id": TEST.images.first().id,
        "key_name": keypair.name
    }
    server_1 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_2", "status": "BUILD", "server_id": "2"})
    server_2 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({
        "name": u'\u4e91\u89c4\u5219',
        "status": "ACTIVE",
        "tenant_id": tenant3.id,
        "server_id": "3"
    })
    server_3 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    TEST.servers.add(server_1, server_2, server_3)

    # VNC Console Data
    console = {
        u'console': {
            u'url': u'http://example.com:6080/vnc_auto.html',
            u'type': u'novnc'
        }
    }
    TEST.servers.vnc_console_data = console
    # SPICE Console Data
    console = {
        u'console': {
            u'url': u'http://example.com:6080/spice_auto.html',
            u'type': u'spice'
        }
    }
    TEST.servers.spice_console_data = console

    # Floating IPs
    def generate_fip(conf):
        return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
                                       conf)

    fip_1 = {
        'id': 1,
        'fixed_ip': '10.0.0.4',
        'instance_id': server_1.id,
        'ip': '58.58.58.58',
        'pool': 'pool1'
    }
    fip_2 = {
        'id': 2,
        'fixed_ip': None,
        'instance_id': None,
        'ip': '58.58.58.58',
        'pool': 'pool2'
    }
    TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2))

    TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
                          nova.FloatingIp(generate_fip(fip_2)))

    # Floating IP with UUID id (for Floating IP with Neutron Proxy)
    fip_3 = {
        'id': str(uuid.uuid4()),
        'fixed_ip': '10.0.0.4',
        'instance_id': server_1.id,
        'ip': '58.58.58.58',
        'pool': 'pool1'
    }
    fip_4 = {
        'id': str(uuid.uuid4()),
        'fixed_ip': None,
        'instance_id': None,
        'ip': '58.58.58.58',
        'pool': 'pool2'
    }
    TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))

    TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
                               nova.FloatingIp(generate_fip(fip_4)))

    # Usage
    usage_vals = {
        "tenant_id": TEST.tenant.id,
        "instance_name": server_1.name,
        "flavor_name": flavor_1.name,
        "flavor_vcpus": flavor_1.vcpus,
        "flavor_disk": flavor_1.disk,
        "flavor_ram": flavor_1.ram
    }
    usage_obj = usage.Usage(usage.UsageManager(None),
                            json.loads(USAGE_DATA % usage_vals))
    TEST.usages.add(usage_obj)

    usage_2_vals = {
        "tenant_id": tenant3.id,
        "instance_name": server_3.name,
        "flavor_name": flavor_1.name,
        "flavor_vcpus": flavor_1.vcpus,
        "flavor_disk": flavor_1.disk,
        "flavor_ram": flavor_1.ram
    }
    usage_obj_2 = usage.Usage(usage.UsageManager(None),
                              json.loads(USAGE_DATA % usage_2_vals))
    TEST.usages.add(usage_obj_2)

    volume_snapshot = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': '40f3fabf-3613-4f5e-90e5-6c9a08333fc3',
            'display_name': 'test snapshot',
            'display_description': 'vol snap!',
            'size': 40,
            'status': 'available',
            'volume_id': '41023e92-8008-4c8b-8059-7f2293ff3775'
        })
    TEST.volume_snapshots.add(volume_snapshot)

    cert_data = {'private_key': 'private', 'data': 'certificate_data'}
    certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
    TEST.certs.add(certificate)

    # Availability Zones
    TEST.availability_zones.add(
        availability_zones.AvailabilityZone(
            availability_zones.AvailabilityZoneManager(None), {
                'zoneName': 'nova',
                'zoneState': {
                    'available': True
                },
                'hosts': {
                    "host001": {
                        "nova-network": {
                            "active": True,
                            "available": True
                        }
                    }
                }
            }))

    # hypervisors
    hypervisor_1 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None), {
            "service": {
                "host": "devstack001",
                "id": 3
            },
            "vcpus_used":
            1,
            "hypervisor_type":
            "QEMU",
            "local_gb_used":
            20,
            "hypervisor_hostname":
            "devstack001",
            "memory_mb_used":
            1500,
            "memory_mb":
            2000,
            "current_workload":
            0,
            "vcpus":
            1,
            "cpu_info":
            '{"vendor": "Intel", "model": "core2duo",'
            '"arch": "x86_64", "features": ["lahf_lm"'
            ', "rdtscp"], "topology": {"cores": 1, "t'
            'hreads": 1, "sockets": 1}}',
            "running_vms":
            1,
            "free_disk_gb":
            9,
            "hypervisor_version":
            1002000,
            "disk_available_least":
            6,
            "local_gb":
            29,
            "free_ram_mb":
            500,
            "id":
            1
        })
    TEST.hypervisors.add(hypervisor_1)

    TEST.hypervisors.stats = {
        "hypervisor_statistics": {
            "count": 5,
            "vcpus_used": 3,
            "local_gb_used": 15,
            "memory_mb": 483310,
            "current_workload": 0,
            "vcpus": 160,
            "running_vms": 3,
            "free_disk_gb": 12548,
            "disk_available_least": 12556,
            "local_gb": 12563,
            "free_ram_mb": 428014,
            "memory_mb_used": 55296
        }
    }

    # Services
    service_1 = services.Service(
        services.ServiceManager(None), {
            "status": "enabled",
            "binary": "nova-conductor",
            "zone": "internal",
            "state": "up",
            "updated_at": "2013-07-08T05:21:00.000000",
            "host": "devstack001",
            "disabled_reason": None
        })

    service_2 = services.Service(
        services.ServiceManager(None), {
            "status": "enabled",
            "binary": "nova-compute",
            "zone": "nova",
            "state": "up",
            "updated_at": "2013-07-08T05:20:51.000000",
            "host": "devstack001",
            "disabled_reason": None
        })
    TEST.services.add(service_1)
    TEST.services.add(service_2)

    # Aggregates
    aggregate_1 = aggregates.Aggregate(
        aggregates.AggregateManager(None), {
            "name": "foo",
            "availability_zone": None,
            "deleted": 0,
            "created_at": "2013-07-04T13:34:38.000000",
            "updated_at": None,
            "hosts": ["foo", "bar"],
            "deleted_at": None,
            "id": 1,
            "metadata": {
                "foo": "testing",
                "bar": "testing"
            }
        })

    aggregate_2 = aggregates.Aggregate(
        aggregates.AggregateManager(None), {
            "name": "bar",
            "availability_zone": "testing",
            "deleted": 0,
            "created_at": "2013-07-04T13:34:38.000000",
            "updated_at": None,
            "hosts": ["foo", "bar"],
            "deleted_at": None,
            "id": 2,
            "metadata": {
                "foo": "testing",
                "bar": "testing"
            }
        })

    TEST.aggregates.add(aggregate_1)
    TEST.aggregates.add(aggregate_2)
Exemplo n.º 4
0
def data(TEST):
    # Data returned by openstack_dashboard.api.neutron wrapper.
    TEST.agents = utils.TestDataContainer()
    TEST.networks = utils.TestDataContainer()
    TEST.subnets = utils.TestDataContainer()
    TEST.subnetpools = utils.TestDataContainer()
    TEST.ports = utils.TestDataContainer()
    TEST.trunks = utils.TestDataContainer()
    TEST.routers = utils.TestDataContainer()
    TEST.routers_with_rules = utils.TestDataContainer()
    TEST.routers_with_routes = utils.TestDataContainer()
    TEST.floating_ips = utils.TestDataContainer()
    TEST.security_groups = utils.TestDataContainer()
    TEST.security_group_rules = utils.TestDataContainer()
    TEST.providers = utils.TestDataContainer()
    TEST.pools = utils.TestDataContainer()
    TEST.vips = utils.TestDataContainer()
    TEST.members = utils.TestDataContainer()
    TEST.monitors = utils.TestDataContainer()
    TEST.neutron_quotas = utils.TestDataContainer()
    TEST.neutron_quota_usages = utils.TestDataContainer()
    TEST.ip_availability = utils.TestDataContainer()
    TEST.qos_policies = utils.TestDataContainer()
    TEST.tp_ports = utils.TestDataContainer()
    TEST.neutron_availability_zones = utils.TestDataContainer()

    # Data return by neutronclient.
    TEST.api_agents = utils.TestDataContainer()
    TEST.api_networks = utils.TestDataContainer()
    TEST.api_subnets = utils.TestDataContainer()
    TEST.api_subnetpools = utils.TestDataContainer()
    TEST.api_ports = utils.TestDataContainer()
    TEST.api_trunks = utils.TestDataContainer()
    TEST.api_routers = utils.TestDataContainer()
    TEST.api_routers_with_routes = utils.TestDataContainer()
    TEST.api_floating_ips = utils.TestDataContainer()
    TEST.api_security_groups = utils.TestDataContainer()
    TEST.api_security_group_rules = utils.TestDataContainer()
    TEST.api_pools = utils.TestDataContainer()
    TEST.api_vips = utils.TestDataContainer()
    TEST.api_members = utils.TestDataContainer()
    TEST.api_monitors = utils.TestDataContainer()
    TEST.api_extensions = utils.TestDataContainer()
    TEST.api_ip_availability = utils.TestDataContainer()
    TEST.api_qos_policies = utils.TestDataContainer()
    TEST.api_tp_trunks = utils.TestDataContainer()
    TEST.api_tp_ports = utils.TestDataContainer()

    # 1st network.
    network_dict = {'admin_state_up': True,
                    'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
                    'name': 'net1',
                    'status': 'ACTIVE',
                    'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9',
                                '41e53a49-442b-4307-9e9a-88967a6b6657'],
                    'tenant_id': '1',
                    'router:external': False,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'end': '10.0.0.254',
                                         'start': '10.0.0.2'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': '10.0.0.0/24',
                   'enable_dhcp': True,
                   'gateway_ip': '10.0.0.1',
                   'id': network_dict['subnets'][0],
                   'ip_version': 4,
                   'name': 'mysubnet1',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id']}
    subnetv6_dict = {
        'allocation_pools': [{'start': 'fdb6:b88a:488e::2',
                              'end': 'fdb6:b88a:488e:0:ffff:ffff:ffff:ffff'}],
        'dns_nameservers': [],
        'host_routes': [],
        'cidr': 'fdb6:b88a:488e::/64',
        'enable_dhcp': True,
        'gateway_ip': 'fdb6:b88a:488e::1',
        'id': network_dict['subnets'][1],
        'ip_version': 6,
        'name': 'myv6subnet',
        'network_id': network_dict['id'],
        'tenant_id': network_dict['tenant_id'],
        'ipv6_ra_mode': 'slaac',
        'ipv6_address_mode': 'slaac'
    }

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)
    TEST.api_subnets.add(subnetv6_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    subnetv6 = neutron.Subnet(subnetv6_dict)
    network['subnets'] = [subnet, subnetv6]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)
    TEST.subnets.add(subnetv6)

    # Ports on 1st network.
    port_dict = {
        'admin_state_up': True,
        'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
        'device_owner': 'network:dhcp',
        'fixed_ips': [{'ip_address': '10.0.0.3',
                       'subnet_id': subnet_dict['id']}],
        'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
        'mac_address': 'fa:16:3e:9c:d5:7e',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'allowed_address_pairs': [
            {'ip_address': '174.0.0.201',
             'mac_address': 'fa:16:3e:7a:7b:18'}
        ],
        'port_security_enabled': True,
        'security_groups': [],
    }

    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    port_dict = {
        'admin_state_up': True,
        'device_id': '1',
        'device_owner': 'compute:nova',
        'fixed_ips': [{'ip_address': '10.0.0.4',
                       'subnet_id': subnet_dict['id']},
                      {'ip_address': 'fdb6:b88a:488e:0:f816:3eff:fe9d:e62f',
                       'subnet_id': subnetv6_dict['id']}],
        'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
        'mac_address': 'fa:16:3e:9d:e6:2f',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'port_security_enabled': True,
        'security_groups': [
            # sec_group_1 ID below
            'faad7c80-3b62-4440-967c-13808c37131d',
            # sec_group_2 ID below
            '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d'
        ],
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))
    assoc_port = port_dict

    port_dict = {
        'admin_state_up': True,
        'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
        'device_owner': 'network:router_interface',
        'fixed_ips': [{'ip_address': '10.0.0.1',
                       'subnet_id': subnet_dict['id']}],
        'id': '9036eedb-e7fa-458e-bc6e-d9d06d9d1bc4',
        'mac_address': 'fa:16:3e:9c:d5:7f',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [],
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))
    port_dict = {
        'admin_state_up': True,
        'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
        'device_owner': 'network:router_interface',
        'fixed_ips': [{'ip_address': 'fdb6:b88a:488e::1',
                       'subnet_id': subnetv6_dict['id']}],
        'id': '8047e0d5-5ef5-4b6e-a1a7-d3a52ad980f7',
        'mac_address': 'fa:16:3e:69:6e:e9',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [],
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    # 2nd network.
    network_dict = {'admin_state_up': True,
                    'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
                    'name': 'net2',
                    'status': 'ACTIVE',
                    'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
                    'tenant_id': '2',
                    'router:external': False,
                    'shared': True}
    subnet_dict = {'allocation_pools': [{'end': '172.16.88.254',
                                         'start': '172.16.88.2'}],
                   'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
                   'host_routes': [{'destination': '192.168.20.0/24',
                                    'nexthop': '172.16.88.253'},
                                   {'destination': '192.168.21.0/24',
                                    'nexthop': '172.16.88.252'}],
                   'cidr': '172.16.88.0/24',
                   'enable_dhcp': True,
                   'gateway_ip': '172.16.88.1',
                   'id': '3f7c5d79-ee55-47b0-9213-8e669fb03009',
                   'ip_version': 4,
                   'name': 'aaaa',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id']}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    port_dict = {
        'admin_state_up': True,
        'device_id': '2',
        'device_owner': 'compute:nova',
        'fixed_ips': [{'ip_address': '172.16.88.3',
                       'subnet_id': subnet_dict['id']}],
        'id': '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
        'mac_address': 'fa:16:3e:56:e6:2f',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [
            # sec_group_1 ID below
            'faad7c80-3b62-4440-967c-13808c37131d',
        ],
    }

    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    # External network.
    network_dict = {'admin_state_up': True,
                    'id': '9b466b94-213a-4cda-badf-72c102a874da',
                    'name': 'ext_net',
                    'status': 'ACTIVE',
                    'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
                    'tenant_id': '3',
                    'router:external': True,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'start': '172.24.4.226.',
                                         'end': '172.24.4.238'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': '172.24.4.0/28',
                   'enable_dhcp': False,
                   'gateway_ip': '172.24.4.225',
                   'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
                   'ip_version': 4,
                   'name': 'ext_subnet',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id']}
    ext_net = network_dict

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # 1st v6 network.
    network_dict = {'admin_state_up': True,
                    'id': '96688ea1-ffa5-78ec-22ca-33aaabfaf775',
                    'name': 'v6_net1',
                    'status': 'ACTIVE',
                    'subnets': ['88ddd443-4377-ab1f-87dd-4bc4a662dbb6'],
                    'tenant_id': '1',
                    'router:external': False,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
                                         'start': 'ff09::02'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': 'ff09::/64',
                   'enable_dhcp': True,
                   'gateway_ip': 'ff09::1',
                   'id': network_dict['subnets'][0],
                   'ip_version': 6,
                   'name': 'v6_subnet1',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id'],
                   'ipv6_modes': 'none/none'}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # 2nd v6 network - slaac.
    network_dict = {'admin_state_up': True,
                    'id': 'c62e4bb3-296a-4cd1-8f6b-aaa7a0092326',
                    'name': 'v6_net2',
                    'status': 'ACTIVE',
                    'subnets': ['5d736a21-0036-4779-8f8b-eed5f98077ec'],
                    'tenant_id': '1',
                    'router:external': False,
                    'shared': False}
    subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
                                         'start': 'ff09::02'}],
                   'dns_nameservers': [],
                   'host_routes': [],
                   'cidr': 'ff09::/64',
                   'enable_dhcp': True,
                   'gateway_ip': 'ff09::1',
                   'id': network_dict['subnets'][0],
                   'ip_version': 6,
                   'name': 'v6_subnet2',
                   'network_id': network_dict['id'],
                   'tenant_id': network_dict['tenant_id'],
                   'ipv6_modes': 'slaac/slaac'}

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # Set up router data.
    port_dict = {
        'admin_state_up': True,
        'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
        'device_owner': 'network:router_gateway',
        'fixed_ips': [{'ip_address': '10.0.0.3',
                       'subnet_id': subnet_dict['id']}],
        'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
        'mac_address': 'fa:16:3e:9c:d5:7e',
        'name': '',
        'network_id': TEST.networks.get(name="ext_net")['id'],
        'status': 'ACTIVE',
        'tenant_id': '1',
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [],
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    trunk_dict = {'status': 'UP',
                  'sub_ports': [],
                  'name': 'trunk1',
                  'description': 'blah',
                  'admin_state_up': True,
                  'tenant_id': '1',
                  'project_id': '1',
                  'port_id': '895d375c-1447-11e7-a52f-f7f280bbc809',
                  'id': '94fcb9e8-1447-11e7-bed6-8b8c4ac74491'}

    TEST.api_trunks.add(trunk_dict)
    TEST.trunks.add(neutron.Trunk(trunk_dict))

    router_dict = {'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
                   'name': 'router1',
                   'status': 'ACTIVE',
                   'admin_state_up': True,
                   'distributed': True,
                   'external_gateway_info':
                       {'network_id': ext_net['id']},
                   'tenant_id': '1',
                   'availability_zone_hints': ['nova']}
    TEST.api_routers.add(router_dict)
    TEST.routers.add(neutron.Router(router_dict))
    router_dict = {'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
                   'name': 'router2',
                   'status': 'ACTIVE',
                   'admin_state_up': False,
                   'distributed': False,
                   'external_gateway_info': None,
                   'tenant_id': '1'}
    TEST.api_routers.add(router_dict)
    TEST.routers.add(neutron.Router(router_dict))
    router_dict = {'id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
                   'name': 'rulerouter',
                   'status': 'ACTIVE',
                   'admin_state_up': True,
                   'distributed': False,
                   'external_gateway_info':
                       {'network_id': ext_net['id']},
                   'tenant_id': '1',
                   'router_rules': [{'id': '101',
                                     'action': 'deny',
                                     'source': 'any',
                                     'destination': 'any',
                                     'nexthops': []},
                                    {'id': '102',
                                     'action': 'permit',
                                     'source': 'any',
                                     'destination': '8.8.8.8/32',
                                     'nexthops': ['1.0.0.2', '1.0.0.1']}]}
    TEST.api_routers.add(router_dict)
    TEST.routers_with_rules.add(neutron.Router(router_dict))
    router_dict_with_route = {'id': '725c24c9-061b-416b-b9d4-012392b32fd9',
                              'name': 'routerouter',
                              'status': 'ACTIVE',
                              'admin_state_up': True,
                              'distributed': False,
                              'external_gateway_info':
                                  {'network_id': ext_net['id']},
                              'tenant_id': '1',
                              'routes': [{'nexthop': '10.0.0.1',
                                          'destination': '172.0.0.0/24'},
                                         {'nexthop': '10.0.0.2',
                                          'destination': '172.1.0.0/24'}]}
    TEST.api_routers_with_routes.add(router_dict_with_route)
    TEST.routers_with_routes.add(neutron.Router(router_dict_with_route))

    # Floating IP.
    # Unassociated.
    fip_dict = {'tenant_id': '1',
                'floating_ip_address': '172.16.88.227',
                'floating_network_id': ext_net['id'],
                'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
                'fixed_ip_address': None,
                'port_id': None,
                'router_id': None}
    TEST.api_floating_ips.add(fip_dict)
    fip_with_instance = copy.deepcopy(fip_dict)
    fip_with_instance.update({'instance_id': None,
                              'instance_type': None})
    TEST.floating_ips.add(neutron.FloatingIp(fip_with_instance))

    # Associated (with compute port on 1st network).
    fip_dict = {'tenant_id': '1',
                'floating_ip_address': '172.16.88.228',
                'floating_network_id': ext_net['id'],
                'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
                'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
                'port_id': assoc_port['id'],
                'router_id': router_dict['id']}
    TEST.api_floating_ips.add(fip_dict)
    fip_with_instance = copy.deepcopy(fip_dict)
    fip_with_instance.update({'instance_id': '1',
                              'instance_type': 'compute'})
    TEST.floating_ips.add(neutron.FloatingIp(fip_with_instance))

    # Security group.

    sec_group_1 = {'tenant_id': '1',
                   'description': 'default',
                   'id': 'faad7c80-3b62-4440-967c-13808c37131d',
                   'name': 'default'}
    sec_group_2 = {'tenant_id': '1',
                   'description': 'NotDefault',
                   'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
                   'name': 'other_group'}
    sec_group_3 = {'tenant_id': '1',
                   'description': 'NotDefault',
                   'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
                   'name': 'another_group'}
    sec_group_empty = {'tenant_id': '1',
                       'description': 'SG without rules',
                       'id': 'f205f3bc-d402-4e40-b004-c62401e19b4b',
                       'name': 'empty_group'}

    def add_rule_to_group(secgroup, default_only=True):
        rule_egress_ipv4 = {
            'id': uuidutils.generate_uuid(),
            'direction': u'egress', 'ethertype': u'IPv4',
            'port_range_min': None, 'port_range_max': None,
            'protocol': None, 'remote_group_id': None,
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_egress_ipv6 = {
            'id': uuidutils.generate_uuid(),
            'direction': u'egress', 'ethertype': u'IPv6',
            'port_range_min': None, 'port_range_max': None,
            'protocol': None, 'remote_group_id': None,
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}

        rule_tcp_80 = {
            'id': uuidutils.generate_uuid(),
            'direction': u'ingress', 'ethertype': u'IPv4',
            'port_range_min': 80, 'port_range_max': 80,
            'protocol': u'tcp', 'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/0',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_icmp = {
            'id': uuidutils.generate_uuid(),
            'direction': u'ingress', 'ethertype': u'IPv4',
            'port_range_min': 5, 'port_range_max': 8,
            'protocol': u'icmp', 'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/0',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_group = {
            'id': uuidutils.generate_uuid(),
            'direction': u'ingress', 'ethertype': u'IPv4',
            'port_range_min': 80, 'port_range_max': 80,
            'protocol': u'tcp', 'remote_group_id': sec_group_1['id'],
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}
        rule_all_tcp = {
            'id': uuidutils.generate_uuid(),
            'direction': u'egress', 'ethertype': u'IPv4',
            'port_range_min': 1, 'port_range_max': 65535,
            'protocol': u'tcp', 'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/24',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']}

        rules = []
        if not default_only:
            rules += [rule_tcp_80, rule_icmp, rule_group, rule_all_tcp]
        rules += [rule_egress_ipv4, rule_egress_ipv6]
        secgroup['security_group_rules'] = rules

    add_rule_to_group(sec_group_1, default_only=False)
    add_rule_to_group(sec_group_2)
    add_rule_to_group(sec_group_3)
    # NOTE: sec_group_empty is a SG without rules,
    # so we don't call add_rule_to_group.

    groups = [sec_group_1, sec_group_2, sec_group_3, sec_group_empty]
    sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
    for sg in groups:
        # Neutron API.
        TEST.api_security_groups.add(sg)
        for rule in sg.get('security_group_rules', []):
            TEST.api_security_group_rules.add(copy.copy(rule))
        # OpenStack Dashboard internaly API.
        TEST.security_groups.add(
            neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
        for rule in sg.get('security_group_rules', []):
            TEST.security_group_rules.add(
                neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))

    # Subnetpools

    # 1st subnetpool
    subnetpool_dict = {'default_prefixlen': 24,
                       'default_quota': None,
                       'id': '419eb314-e244-4088-aed7-851af9d9500d',
                       'ip_version': 4,
                       'max_prefixlen': 32,
                       'min_prefixlen': 12,
                       'name': 'mysubnetpool1',
                       'prefixes': ['172.16.0.0/12'],
                       'shared': False,
                       'tenant_id': '1'}

    TEST.api_subnetpools.add(subnetpool_dict)
    subnetpool = neutron.SubnetPool(subnetpool_dict)
    TEST.subnetpools.add(subnetpool)

    # 2nd subnetpool (v6)
    subnetpool_dict = {'default_prefixlen': 64,
                       'default_quota': None,
                       'id': 'dcdad289-46f3-4298-bec6-41d91c942efa',
                       'ip_version': 6,
                       'max_prefixlen': 64,
                       'min_prefixlen': 60,
                       'name': 'mysubnetpool2',
                       'prefixes': ['2001:db8:42::/48'],
                       'shared': False,
                       'tenant_id': '1'}

    TEST.api_subnetpools.add(subnetpool_dict)
    subnetpool = neutron.SubnetPool(subnetpool_dict)
    TEST.subnetpools.add(subnetpool)

    # Quotas.
    quota_data = {'network': '10',
                  'subnet': '10',
                  'port': '50',
                  'router': '10',
                  'floatingip': '50',
                  'security_group': '20',
                  'security_group_rule': '100',
                  }
    TEST.neutron_quotas.add(base.QuotaSet(quota_data))

    # Quota Usages
    quota_usage_data = {
        'network': {'used': 0, 'quota': 5},
        'subnet': {'used': 0, 'quota': 5},
        'port': {'used': 0, 'quota': 5},
        'router': {'used': 0, 'quota': 5},
        'floatingip': {'used': 0, 'quota': 10},
    }
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.neutron_quota_usages.add(quota_usage)

    # Extensions.
    extension_1 = {"name": "security-group",
                   "alias": "security-group",
                   "description": "The security groups extension."}
    extension_2 = {"name": "Quota management support",
                   "alias": "quotas",
                   "description": "Expose functions for quotas management"}
    extension_3 = {"name": "Provider network",
                   "alias": "provider",
                   "description": "Provider network extension"}
    extension_4 = {"name": "Distributed Virtual Router",
                   "alias": "dvr",
                   "description":
                   "Enables configuration of Distributed Virtual Routers."}
    extension_5 = {"name": "HA Router extension",
                   "alias": "l3-ha",
                   "description": "Add HA capability to routers."}
    extension_6 = {"name": "Trunks",
                   "alias": "trunk",
                   "description": "Provides support for trunk ports."}
    TEST.api_extensions.add(extension_1)
    TEST.api_extensions.add(extension_2)
    TEST.api_extensions.add(extension_3)
    TEST.api_extensions.add(extension_4)
    TEST.api_extensions.add(extension_5)
    TEST.api_extensions.add(extension_6)

    # 1st agent.
    agent_dict = {"binary": "neutron-openvswitch-agent",
                  "description": None,
                  "admin_state_up": True,
                  "heartbeat_timestamp": "2013-07-26 06:51:47",
                  "alive": True,
                  "id": "c876ff05-f440-443e-808c-1d34cda3e88a",
                  "topic": "N/A",
                  "host": "devstack001",
                  "agent_type": "Open vSwitch agent",
                  "started_at": "2013-07-26 05:23:28",
                  "created_at": "2013-07-26 05:23:28",
                  "configurations": {"devices": 2}}
    TEST.api_agents.add(agent_dict)
    TEST.agents.add(neutron.Agent(agent_dict))

    # 2nd agent.
    agent_dict = {"binary": "neutron-dhcp-agent",
                  "description": None,
                  "admin_state_up": True,
                  "heartbeat_timestamp": "2013-07-26 06:51:48",
                  "alive": True,
                  "id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
                  "topic": "dhcp_agent",
                  "host": "devstack001",
                  "agent_type": "DHCP agent",
                  "started_at": "2013-07-26 05:23:30",
                  "created_at": "2013-07-26 05:23:30",
                  "configurations": {
                      "subnets": 1,
                      "use_namespaces": True,
                      "dhcp_lease_duration": 120,
                      "dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
                      "networks": 1,
                      "ports": 1}}
    TEST.api_agents.add(agent_dict)
    TEST.agents.add(neutron.Agent(agent_dict))

    # Service providers.
    provider_1 = {"service_type": "LOADBALANCER",
                  "name": "haproxy",
                  "default": True}
    TEST.providers.add(provider_1)

    # ports on 4th network
    port_dict = {
        'admin_state_up': True,
        'device_id': '9872faaa-b2b2-eeee-9911-21332eedaa77',
        'device_owner': 'network:dhcp',
        'fixed_ips': [{'ip_address': '11.10.0.3',
                       'subnet_id':
                       TEST.subnets.first().id}],
        'id': 'a21dcd22-6733-cccc-aa32-22adafaf16a2',
        'mac_address': '78:22:ff:1a:ba:23',
        'name': 'port5',
        'network_id': TEST.networks.first().id,
        'status': 'ACTIVE',
        'tenant_id': TEST.networks.first().tenant_id,
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [],
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    availability = {'network_ip_availability': {
        'used_ips': 2,
        'subnet_ip_availability': [{
            'used_ips': 1,
            'subnet_id': '2c90f321-9cc7-41b4-a3cf-88110f120a94',
            'subnet_name': 'ipv6-public-subnet',
            'ip_version': 6,
            'cidr': '2001:db8::/64',
            'total_ips': 18446744073709551614},
            {'used_ips': 1,
             'subnet_id': '4d77d5fb-c26c-4ac5-b2ca-fca2f89b0fc1',
             'subnet_name': 'public-subnet',
             'ip_version': 4,
             'cidr': '172.24.4.0/24',
             'total_ips': 253}],
        'network_id': 'd87d5be5-cfca-486f-8db5-a446330e4513',
        'tenant_id': 'd564b2a4fc0544fb89f8a0434dd96863',
        'network_name': 'public',
        'total_ips': 18446744073709551867}
    }

    TEST.ip_availability.add(availability)
    TEST.api_ip_availability.add(availability)

    # qos policies
    policy_dict = {'id': 'a21dcd22-7189-cccc-aa32-22adafaf16a7',
                   'name': 'policy 1',
                   'tenant_id': '1'}
    TEST.api_qos_policies.add(policy_dict)
    TEST.qos_policies.add(neutron.QoSPolicy(policy_dict))
    policy_dict1 = {'id': 'a21dcd22-7189-ssss-aa32-22adafaf16a7',
                    'name': 'policy 2',
                    'tenant_id': '1'}
    TEST.api_qos_policies.add(policy_dict1)
    TEST.qos_policies.add(neutron.QoSPolicy(policy_dict1))

    # TRUNKPORT
    #
    #  The test setup was created by the following command sequence:
    #    openstack network create tst
    #    openstack subnet create tstsub --network tst\
    #    --subnet-range 10.10.16.128/26
    #    openstack network create tstalt
    #    openstack subnet create tstaltsub --network tstalt\
    #    --subnet-range 10.10.17.128/26
    #    openstack port create --network tst plain
    #    openstack port create --network tst parent
    #    openstack port create --network tst child1
    #    openstack port create --network tstalt child2
    #    openstack network trunk create --parent-port parent trunk
    #    openstack network trunk set\
    #    --subport port=child1,segmentation-type=vlan,segmentation-id=100 trunk
    #    openstack network trunk set\
    #    --subport port=child2,segmentation-type=vlan,segmentation-id=200 trunk
    #   ids/uuids are captured from a live setup.

    # This collection holds the test setup.
    tdata = {'tenant_id': '19c9123a944644cb9e923497a018d0b7',
             'trunk_id': '920625a3-13de-46b4-b6c9-8b35f29b3cfe',
             'security_group': '3fd8c007-9093-4aa3-b475-a0c178d4e1e4',
             'tag_1': 100,
             'tag_2': 200,
             'net': {'tst_id': '5a340332-cc92-42aa-8980-15f47c0d0f3d',
                     'tstalt_id': '0fb41ffd-3933-4da4-8a83-025d328aedf3'},
             'subnet': {'tst_id': '0b883baf-5a21-4605-ab56-229a24ec585b',
                        'tstalt_id': '0e184cf2-97dc-4738-b4b3-1871faf5d685'},
             'child1': {'id': '9c151ffb-d7a6-4f15-8eae-d0950999fdfe',
                        'ip': '10.10.16.140',
                        'mac': 'fa:16:3e:22:63:6f',
                        'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61'},
             'child2': {'id': 'cedb145f-c163-4630-98a3-e1990744bdef',
                        'ip': '10.10.17.137',
                        'mac': 'fa:16:3e:0d:ca:eb',
                        'device_id': '9872faaa-b2b2-eeee-9911-21332eedaa77'},
             'parent': {'id': '5b27429d-048b-40fa-88f9-8e2c4ff7d28b',
                        'ip': '10.10.16.141',
                        'mac': 'fa:16:3e:ab:a8:22',
                        'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890'},
             'plain': {'id': 'bc04da56-d7fc-461e-b95d-a2c66e77ad9a',
                       'ip': '10.10.16.135',
                       'mac': 'fa:16:3e:9c:d5:7f',
                       'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53'}}

    #  network tst

    #    trunk
    tp_trunk_dict = {
        'status': 'UP',
        'sub_ports': [{'segmentation_type': 'vlan',
                       'segmentation_id': tdata['tag_1'],
                       'port_id': tdata['child1']['id']},
                      {'segmentation_type': u'vlan',
                       'segmentation_id': tdata['tag_2'],
                       'port_id': tdata['child2']['id']}],
        'name': 'trunk',
        'admin_state_up': True,
        'tenant_id': tdata['tenant_id'],
        'project_id': tdata['tenant_id'],
        'port_id': tdata['parent']['id'],
        'id': tdata['trunk_id']
    }
    TEST.api_tp_trunks.add(tp_trunk_dict)

    #    port parent
    parent_port_dict = {
        'admin_state_up': True,
        'device_id': tdata['parent']['device_id'],
        'device_owner': 'compute:nova',
        'fixed_ips': [{'ip_address': tdata['parent']['ip'],
                       'subnet_id': tdata['subnet']['tst_id']}],
        'id': tdata['parent']['id'],
        'mac_address': tdata['parent']['mac'],
        'name': 'parent',
        'network_id': tdata['net']['tst_id'],
        'status': 'ACTIVE',
        'tenant_id': tdata['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [tdata['security_group']],
        'trunk_details': {
            'sub_ports': [{'segmentation_type': 'vlan',
                           'mac_address': tdata['child1']['mac'],
                           'segmentation_id': tdata['tag_1'],
                           'port_id': tdata['child1']['id']},
                          {'segmentation_type': 'vlan',
                           'mac_address': tdata['child2']['mac'],
                           'segmentation_id': tdata['tag_2'],
                           'port_id': tdata['child2']['id']}],
            'trunk_id': tdata['trunk_id']}
    }
    TEST.api_tp_ports.add(parent_port_dict)
    TEST.tp_ports.add(neutron.PortTrunkParent(parent_port_dict))

    #    port child1
    child1_port_dict = {
        'admin_state_up': True,
        'device_id': tdata['child1']['device_id'],
        'device_owner': 'compute:nova',
        'fixed_ips': [{'ip_address': tdata['child1']['ip'],
                       'subnet_id': tdata['subnet']['tst_id']}],
        'id': tdata['child1']['id'],
        'mac_address': tdata['child1']['mac'],
        'name': 'child1',
        'network_id': tdata['net']['tst_id'],
        'status': 'ACTIVE',
        'tenant_id': tdata['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [tdata['security_group']]
    }
    TEST.api_tp_ports.add(child1_port_dict)
    TEST.tp_ports.add(neutron.PortTrunkSubport(
        child1_port_dict,
        {'trunk_id': tdata['trunk_id'],
         'segmentation_type': 'vlan',
         'segmentation_id': tdata['tag_1']}))

    #    port plain
    port_dict = {
        'admin_state_up': True,
        'device_id': tdata['plain']['device_id'],
        'device_owner': 'compute:nova',
        'fixed_ips': [{'ip_address': tdata['plain']['ip'],
                       'subnet_id': tdata['subnet']['tst_id']}],
        'id': tdata['plain']['id'],
        'mac_address': tdata['plain']['mac'],
        'name': 'plain',
        'network_id': tdata['net']['tst_id'],
        'status': 'ACTIVE',
        'tenant_id': tdata['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [tdata['security_group']]
    }
    TEST.api_tp_ports.add(port_dict)
    TEST.tp_ports.add(neutron.Port(port_dict))

    #  network tstalt

    #    port child2
    child2_port_dict = {
        'admin_state_up': True,
        'device_id': tdata['child2']['device_id'],
        'device_owner': 'compute:nova',
        'fixed_ips': [{'ip_address': tdata['child2']['ip'],
                       'subnet_id': tdata['subnet']['tstalt_id']}],
        'id': tdata['child2']['id'],
        'mac_address': tdata['child2']['mac'],
        'name': 'child2',
        'network_id': tdata['net']['tstalt_id'],
        'status': 'ACTIVE',
        'tenant_id': tdata['tenant_id'],
        'binding:vnic_type': 'normal',
        'binding:host_id': 'host',
        'security_groups': [tdata['security_group']]
    }
    TEST.api_tp_ports.add(child2_port_dict)
    TEST.tp_ports.add(neutron.PortTrunkSubport(
        child2_port_dict,
        {'trunk_id': tdata['trunk_id'],
         'segmentation_type': 'vlan',
         'segmentation_id': tdata['tag_2']}))

    # Availability Zones
    TEST.neutron_availability_zones.add(
        {
            'state': 'available',
            'resource': 'router',
            'name': 'nova'
        }
    )
Exemplo n.º 5
0
def data(TEST):

    # BareMetalNode
    TEST.baremetalclient_nodes = test_data_utils.TestDataContainer()
    bm_node_1 = baremetal.BareMetalNode(
        baremetal.BareMetalNodeManager(None), {
            'id':
            '1',
            'uuid':
            'd0ace338-a702-426a-b344-394ce861e070',
            'ipmi_address':
            '1.1.1.1',
            'ipmi_username':
            '******',
            'ipmi_password':
            '******',
            'ip_address':
            '192.0.2.36',
            'instance_uuid':
            'aa',
            "service_host":
            "undercloud",
            "cpus":
            1,
            "memory_mb":
            4096,
            "local_gb":
            20,
            'task_state':
            'active',
            "pm_address":
            '1.1.1.1',
            "pm_user":
            '******',
            "interfaces": [{
                "address": "52:54:00:90:38:01"
            }, {
                "address": "52:54:00:90:38:02"
            }],
        })
    bm_node_2 = baremetal.BareMetalNode(
        baremetal.BareMetalNodeManager(None), {
            'id': '2',
            'uuid': 'bd70e5e7-52e6-40d6-b862-c7f7ea1f262e',
            'instance_uuid': 'bb',
            "service_host": "undercloud",
            "cpus": 1,
            "memory_mb": 4096,
            "local_gb": 20,
            'task_state': 'active',
            "pm_address": None,
            "pm_user": None,
            "interfaces": [{
                "address": "52:54:00:90:38:01"
            }],
        })
    bm_node_3 = baremetal.BareMetalNode(
        baremetal.BareMetalNodeManager(None), {
            'id': '3',
            'uuid': '74981-2cfa-4e15-be96-3f0ec5635115',
            'instance_uuid': 'cc',
            "service_host": "undercloud",
            "cpus": 1,
            "memory_mb": 4096,
            "local_gb": 20,
            'task_state': 'reboot',
            "pm_address": None,
            "pm_user": None,
            "interfaces": [{
                "address": "52:54:00:90:38:01"
            }],
        })
    bm_node_4 = baremetal.BareMetalNode(
        baremetal.BareMetalNodeManager(None), {
            'id': '4',
            'uuid': 'f5c1df48-dcbe-4eb5-bd44-9eef2cb9139a',
            'instance_uuid': 'cc',
            "service_host": "undercloud",
            "cpus": 1,
            "memory_mb": 4096,
            "local_gb": 20,
            'task_state': 'active',
            "pm_address": None,
            "pm_user": None,
            "interfaces": [{
                "address": "52:54:00:90:38:01"
            }],
        })
    bm_node_5 = baremetal.BareMetalNode(
        baremetal.BareMetalNodeManager(None), {
            'id': '5',
            'uuid': 'c8998d40-2ff6-4233-8535-b44a825b20c3',
            'instance_uuid': 'dd',
            "service_host": "undercloud",
            "cpus": 1,
            "memory_mb": 4096,
            "local_gb": 20,
            'task_state': 'error',
            "pm_address": None,
            "pm_user": None,
            "interfaces": [{
                "address": "52:54:00:90:38:01"
            }],
        })
    bm_node_6 = baremetal.BareMetalNode(
        baremetal.BareMetalNodeManager(None), {
            'id': '6',
            'uuid': 'cfd5a2cf-f21c-4044-a604-acb855478e44',
            'instance_uuid': None,
            "service_host": "undercloud",
            "cpus": 1,
            "memory_mb": 4096,
            "local_gb": 20,
            'task_state': None,
            "pm_address": None,
            "pm_user": None,
            "interfaces": [{
                "address": "52:54:00:90:38:01"
            }],
        })
    TEST.baremetalclient_nodes.add(bm_node_1, bm_node_2, bm_node_3, bm_node_4,
                                   bm_node_5, bm_node_6)

    # IronicNode
    TEST.ironicclient_nodes = test_data_utils.TestDataContainer()
    node_1 = node.Node(
        node.NodeManager(None), {
            'id': '1',
            'uuid': 'aa-11',
            'instance_uuid': 'aa',
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '1.1.1.1',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.2'
            },
            'properties': {
                'cpus': '8',
                'memory_mb': '4096',
                'local_gb': '10',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'on',
            'target_power_state': 'on',
            'maintenance': None,
            'newly_discovered': None,
            'provision_state': 'active',
            'extra': {}
        })
    node_2 = node.Node(
        node.NodeManager(None), {
            'id': '2',
            'uuid': 'bb-22',
            'instance_uuid': 'bb',
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '2.2.2.2',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.3'
            },
            'properties': {
                'cpus': '16',
                'memory_mb': '4096',
                'local_gb': '100',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'on',
            'target_power_state': 'on',
            'maintenance': None,
            'newly_discovered': None,
            'provision_state': 'active',
            'extra': {}
        })
    node_3 = node.Node(
        node.NodeManager(None), {
            'id': '3',
            'uuid': 'cc-33',
            'instance_uuid': 'cc',
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '3.3.3.3',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.4'
            },
            'properties': {
                'cpus': '32',
                'memory_mb': '8192',
                'local_gb': '1',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'rebooting',
            'target_power_state': 'on',
            'maintenance': None,
            'newly_discovered': None,
            'provision_state': 'deploying',
            'extra': {}
        })
    node_4 = node.Node(
        node.NodeManager(None), {
            'id': '4',
            'uuid': 'cc-44',
            'instance_uuid': 'cc',
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '4.4.4.4',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.5'
            },
            'properties': {
                'cpus': '8',
                'memory_mb': '4096',
                'local_gb': '10',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'on',
            'target_power_state': 'on',
            'maintenance': None,
            'newly_discovered': None,
            'provision_state': 'deploying',
            'extra': {}
        })
    node_5 = node.Node(
        node.NodeManager(None), {
            'id': '5',
            'uuid': 'dd-55',
            'instance_uuid': 'dd',
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '5.5.5.5',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.6'
            },
            'properties': {
                'cpus': '8',
                'memory_mb': '4096',
                'local_gb': '10',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'error',
            'target_power_state': 'on',
            'provision_state': 'error',
            'maintenance': None,
            'newly_discovered': None,
            'extra': {}
        })
    node_6 = node.Node(
        node.NodeManager(None), {
            'id': '6',
            'uuid': 'ff-66',
            'instance_uuid': None,
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '5.5.5.5',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.6'
            },
            'properties': {
                'cpus': '8',
                'memory_mb': '4096',
                'local_gb': '10',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'on',
            'target_power_state': 'on',
            'maintenance': None,
            'newly_discovered': None,
            'provision_state': 'active',
            'extra': {}
        })
    node_7 = node.Node(
        node.NodeManager(None), {
            'id': '7',
            'uuid': 'gg-77',
            'instance_uuid': None,
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '7.7.7.7',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.7'
            },
            'properties': {
                'cpus': '8',
                'memory_mb': '4096',
                'local_gb': '10',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'off',
            'target_power_state': 'on',
            'maintenance': True,
            'newly_discovered': None,
            'provision_state': 'deploying',
            'extra': {}
        })
    node_8 = node.Node(
        node.NodeManager(None), {
            'id': '8',
            'uuid': 'hh-88',
            'instance_uuid': None,
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '8.8.8.8',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.8'
            },
            'properties': {
                'cpus': '8',
                'memory_mb': '4096',
                'local_gb': '10',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'on',
            'target_power_state': 'on',
            'maintenance': True,
            'newly_discovered': True,
            'provision_state': 'active',
            'extra': {}
        })
    node_9 = node.Node(
        node.NodeManager(None), {
            'id': '9',
            'uuid': 'ii-99',
            'instance_uuid': None,
            'driver': 'pxe_ipmitool',
            'driver_info': {
                'ipmi_address': '9.9.9.9',
                'ipmi_username': '******',
                'ipmi_password': '******',
                'ip_address': '1.2.2.9'
            },
            'properties': {
                'cpus': '16',
                'memory_mb': '8192',
                'local_gb': '1000',
                'cpu_arch': 'x86_64',
            },
            'power_state': 'on',
            'target_power_state': 'on',
            'maintenance': True,
            'newly_discovered': True,
            'provision_state': 'active',
            'extra': {}
        })
    TEST.ironicclient_nodes.add(node_1, node_2, node_3, node_4, node_5, node_6,
                                node_7, node_8, node_9)

    # Ports
    TEST.ironicclient_ports = test_data_utils.TestDataContainer()
    port_1 = port.Port(port.PortManager(None), {
        'id': '1-port-id',
        'type': 'port',
        'address': 'aa:aa:aa:aa:aa:aa'
    })
    port_2 = port.Port(port.PortManager(None), {
        'id': '2-port-id',
        'type': 'port',
        'address': 'bb:bb:bb:bb:bb:bb'
    })
    port_3 = port.Port(port.PortManager(None), {
        'id': '3-port-id',
        'type': 'port',
        'address': 'cc:cc:cc:cc:cc:cc'
    })
    port_4 = port.Port(port.PortManager(None), {
        'id': '4-port-id',
        'type': 'port',
        'address': 'dd:dd:dd:dd:dd:dd'
    })
    TEST.ironicclient_ports.add(port_1, port_2, port_3, port_4)
Exemplo n.º 6
0
def data(TEST):
    TEST.containers = utils.TestDataContainer()
    TEST.objects = utils.TestDataContainer()
    TEST.folder = utils.TestDataContainer()
    TEST.folder_alt = utils.TestDataContainer()
    TEST.subfolder = utils.TestDataContainer()

    # '%' can break URL if not properly url-quoted
    # ' ' (space) can break 'Content-Disposition' if not properly
    # double-quoted

    container_dict_1 = {"name": u"container one%\u6346",
                        "container_object_count": 2,
                        "container_bytes_used": 256,
                        "timestamp": timeutils.utcnow().isoformat(),
                        "is_public": False,
                        "public_url": ""}
    container_1 = swift.Container(container_dict_1)
    container_2_name = u"container_two\u6346"
    container_dict_2 = {"name": container_2_name,
                        "container_object_count": 4,
                        "container_bytes_used": 1024,
                        "timestamp": timeutils.utcnow().isoformat(),
                        "is_public": True,
                        "public_url":
                            "http://public.swift.example.com:8080/" +
                            "v1/project_id/%s" % utils_http.urlquote(
                                container_2_name)}
    container_2 = swift.Container(container_dict_2)
    container_dict_3 = {"name": u"container,three%\u6346",
                        "container_object_count": 2,
                        "container_bytes_used": 256,
                        "timestamp": timeutils.utcnow().isoformat(),
                        "is_public": False,
                        "public_url": ""}
    container_3 = swift.Container(container_dict_3)
    TEST.containers.add(container_1, container_2, container_3)

    object_dict = {"name": u"test object%\u6346",
                   "content_type": u"text/plain",
                   "bytes": 128,
                   "timestamp": timeutils.utcnow().isoformat(),
                   "last_modified": None,
                   "hash": u"object_hash"}
    object_dict_2 = {"name": u"test_object_two\u6346",
                     "content_type": u"text/plain",
                     "bytes": 128,
                     "timestamp": timeutils.utcnow().isoformat(),
                     "last_modified": None,
                     "hash": u"object_hash_2"}
    object_dict_3 = {"name": u"test,object_three%\u6346",
                     "content_type": u"text/plain",
                     "bytes": 128,
                     "timestamp": timeutils.utcnow().isoformat(),
                     "last_modified": None,
                     "hash": u"object_hash"}
    object_dict_4 = {"name": u"test folder%\u6346/test.txt",
                     "content_type": u"text/plain",
                     "bytes": 128,
                     "timestamp": timeutils.utcnow().isoformat(),
                     "last_modified": None,
                     "hash": u"object_hash"}
    obj_dicts = [object_dict, object_dict_2, object_dict_3, object_dict_4]
    obj_data = b"Fake Data"

    for obj_dict in obj_dicts:
        swift_object = swift.StorageObject(obj_dict,
                                           container_1.name,
                                           data=obj_data)
        TEST.objects.add(swift_object)

    folder_dict = {"subdir": u"test folder%\u6346/"}

    TEST.folder.add(swift.PseudoFolder(folder_dict, container_1.name))

    # when the folder is returned as part of a prefix match, this content
    # is returned by Swift instead:
    folder_dict_alt = {
        "name": u"test folder%\u6346/",
        "bytes": 0,
        "last_modified": timeutils.utcnow().isoformat(),
        "content_type": u"application/octet-stream",
        "hash": u"object_hash"
    }
    TEST.folder_alt.add(swift.PseudoFolder(folder_dict_alt, container_1.name))

    # just the objects matching the folder prefix
    TEST.subfolder.add(swift.PseudoFolder(folder_dict_alt, container_1.name))
    TEST.subfolder.add(swift.StorageObject(object_dict_4, container_1.name,
                                           data=object_dict_4))
Exemplo n.º 7
0
def data(TEST):
    TEST.cinder_services = utils.TestDataContainer()
    TEST.cinder_volumes = utils.TestDataContainer()
    TEST.cinder_volume_backups = utils.TestDataContainer()
    TEST.cinder_volume_encryption_types = utils.TestDataContainer()
    TEST.cinder_volume_types = utils.TestDataContainer()
    TEST.cinder_type_access = utils.TestDataContainer()
    TEST.cinder_volume_encryption = utils.TestDataContainer()
    TEST.cinder_bootable_volumes = utils.TestDataContainer()
    TEST.cinder_qos_specs = utils.TestDataContainer()
    TEST.cinder_qos_spec_associations = utils.TestDataContainer()
    TEST.cinder_volume_snapshots = utils.TestDataContainer()
    TEST.cinder_extensions = utils.TestDataContainer()
    TEST.cinder_quotas = utils.TestDataContainer()
    TEST.cinder_quota_usages = utils.TestDataContainer()
    TEST.cinder_availability_zones = utils.TestDataContainer()
    TEST.cinder_volume_transfers = utils.TestDataContainer()
    TEST.cinder_pools = utils.TestDataContainer()
    TEST.cinder_groups = utils.TestDataContainer()
    TEST.cinder_group_types = utils.TestDataContainer()
    TEST.cinder_group_snapshots = utils.TestDataContainer()
    TEST.cinder_group_volumes = utils.TestDataContainer()
    TEST.cinder_volume_snapshots_with_groups = utils.TestDataContainer()

    # Services
    service_1 = services.Service(
        services.ServiceManager(None), {
            "service": "cinder-scheduler",
            "status": "enabled",
            "binary": "cinder-scheduler",
            "zone": "internal",
            "state": "up",
            "updated_at": "2013-07-08T05:21:00.000000",
            "host": "devstack001",
            "disabled_reason": None
        })

    service_2 = services.Service(
        services.ServiceManager(None), {
            "service": "cinder-volume",
            "status": "enabled",
            "binary": "cinder-volume",
            "zone": "nova",
            "state": "up",
            "updated_at": "2013-07-08T05:20:51.000000",
            "host": "devstack001",
            "disabled_reason": None
        })
    TEST.cinder_services.add(service_1)
    TEST.cinder_services.add(service_2)

    # Volumes - Cinder v1
    volume = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
            'status': 'available',
            'size': 40,
            'name': 'Volume name',
            'display_description': 'Volume description',
            'created_at': '2014-01-27 10:30:00',
            'volume_type': None,
            'attachments': []
        })
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            "id": "4b069dd0-6eaa-4272-8abc-5448a68f1cce",
            "status": 'available',
            "size": 10,
            "name": '',
            "display_description": '',
            "device": "/dev/hda",
            "created_at": '2010-11-21 18:34:25',
            "volume_type": 'vol_type_1',
            "attachments": []
        })
    other_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "21023e92-8008-1234-8059-7f2293ff3889",
            'status': 'in-use',
            'size': 10,
            'name': u'my_volume',
            'display_description': '',
            'created_at': '2013-04-01 10:30:00',
            'volume_type': None,
            'attachments': [{
                "id": "1",
                "server_id": '1',
                "device": "/dev/hda"
            }]
        })
    volume_with_type = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "7dcb47fd-07d9-42c2-9647-be5eab799ebe",
            'name': 'my_volume2',
            'status': 'in-use',
            'size': 10,
            'name': u'my_volume2',
            'display_description': '',
            'created_at': '2013-04-01 10:30:00',
            'volume_type': 'vol_type_2',
            'attachments': [{
                "id": "2",
                "server_id": '2',
                "device": "/dev/hdb"
            }]
        })
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "21023e92-8008-1234-8059-7f2293ff3890",
            'status': 'in-use',
            'size': 10,
            'name': u'my_volume',
            'display_description': '',
            'created_at': '2013-04-01 10:30:00',
            'volume_type': None,
            'bootable': False,
            'attachments': [{
                "id": "1",
                "server_id": '1',
                "device": "/dev/hda"
            }]
        })

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    other_volume.bootable = 'true'

    TEST.cinder_volumes.add(api.cinder.Volume(volume))
    TEST.cinder_volumes.add(api.cinder.Volume(nameless_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(other_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(volume_with_type))

    TEST.cinder_bootable_volumes.add(api.cinder.Volume(non_bootable_volume))

    vol_type1 = volume_types.VolumeType(
        volume_types.VolumeTypeManager(None), {
            'id': u'1',
            'name': u'vol_type_1',
            'description': 'type 1 description',
            'extra_specs': {
                'foo': 'bar',
                'volume_backend_name': 'backend_1'
            }
        })
    vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None), {
        'id': u'2',
        'name': u'vol_type_2',
        'description': 'type 2 description'
    })
    vol_type3 = volume_types.VolumeType(
        volume_types.VolumeTypeManager(None), {
            'id': u'3',
            'name': u'vol_type_3',
            'is_public': False,
            'description': 'type 3 description'
        })
    TEST.cinder_volume_types.add(vol_type1, vol_type2, vol_type3)
    vol_type_access1 = volume_type_access.VolumeTypeAccess(
        volume_type_access.VolumeTypeAccessManager(None), {
            'volume_type_id': u'1',
            'project_id': u'1'
        })
    TEST.cinder_type_access.add(vol_type_access1)

    # Volumes - Cinder v2
    volume_v2 = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "31023e92-8008-4c8b-8059-7f2293ff1234",
            'name': 'v2_volume',
            'description': "v2 Volume Description",
            'status': 'available',
            'size': 20,
            'created_at': '2014-01-27 10:30:00',
            'volume_type': None,
            'os-vol-host-attr:host': 'host@backend-name#pool',
            'bootable': 'true',
            'attachments': []
        })
    volume_v2.bootable = 'true'

    TEST.cinder_volumes.add(api.cinder.Volume(volume_v2))

    snapshot = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
            'display_name': 'test snapshot',
            'display_description': 'volume snapshot',
            'size': 40,
            'status': 'available',
            'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'
        })
    snapshot2 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0f',
            'name': '',
            'description': 'v2 volume snapshot description',
            'size': 80,
            'status': 'available',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })
    snapshot3 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0e',
            'name': '',
            'description': 'v2 volume snapshot description 2',
            'size': 80,
            'status': 'available',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })
    snapshot4 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': 'cd6be1eb-82ca-4587-8036-13c37c00c2b1',
            'name': '',
            'description': 'v2 volume snapshot with metadata description',
            'size': 80,
            'status': 'available',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234',
            'metadata': {
                'snapshot_meta_key': 'snapshot_meta_value'
            }
        })

    snapshot.bootable = 'true'
    snapshot2.bootable = 'true'

    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot2))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot3))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot4))
    TEST.cinder_volume_snapshots.first()._volume = volume

    # Volume Type Encryption
    vol_enc_type1 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {
            'volume_type_id': u'1',
            'control_location': "front-end",
            'key_size': 512,
            'provider': "a-provider",
            'cipher': "a-cipher"
        })
    vol_enc_type2 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {
            'volume_type_id': u'2',
            'control_location': "front-end",
            'key_size': 256,
            'provider': "a-provider",
            'cipher': "a-cipher"
        })
    vol_unenc_type1 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {})
    TEST.cinder_volume_encryption_types.add(vol_enc_type1, vol_enc_type2,
                                            vol_unenc_type1)

    volume_backup1 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None), {
            'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
            'name': 'backup1',
            'description': 'volume backup 1',
            'size': 10,
            'status': 'available',
            'container_name': 'volumebackups',
            'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'
        })

    volume_backup2 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None), {
            'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e52',
            'name': 'backup2',
            'description': 'volume backup 2',
            'size': 20,
            'status': 'available',
            'container_name': 'volumebackups',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })

    volume_backup3 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None), {
            'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e53',
            'name': 'backup3',
            'description': 'volume backup 3',
            'size': 20,
            'status': 'available',
            'container_name': 'volumebackups',
            'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'
        })

    TEST.cinder_volume_backups.add(volume_backup1)
    TEST.cinder_volume_backups.add(volume_backup2)
    TEST.cinder_volume_backups.add(volume_backup3)

    # Volume Encryption
    vol_enc_metadata1 = volumes.Volume(
        volumes.VolumeManager(None), {
            'cipher': 'test-cipher',
            'key_size': 512,
            'provider': 'test-provider',
            'control_location': 'front-end'
        })
    vol_unenc_metadata1 = volumes.Volume(volumes.VolumeManager(None), {})
    TEST.cinder_volume_encryption.add(vol_enc_metadata1)
    TEST.cinder_volume_encryption.add(vol_unenc_metadata1)

    # v2 extensions

    extensions = [
        {
            'alias': 'os-services',
            'description': 'Services support.',
            'links': '[]',
            'name': 'Services',
            'updated': '2012-10-28T00:00:00-00:00'
        },
        {
            'alias': 'os-admin-actions',
            'description': 'Enable admin actions.',
            'links': '[]',
            'name': 'AdminActions',
            'updated': '2012-08-25T00:00:00+00:00'
        },
        {
            'alias': 'os-volume-transfer',
            'description': 'Volume transfer management support.',
            'links': '[]',
            'name': 'VolumeTransfer',
            'updated': '2013-05-29T00:00:00+00:00'
        },
    ]
    extensions = [
        cinder_list_extensions.ListExtResource(
            cinder_list_extensions.ListExtManager(None), ext)
        for ext in extensions
    ]
    TEST.cinder_extensions.add(*extensions)

    # Quota Sets
    quota_data = dict(volumes='1', snapshots='1', gigabytes='1000')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.cinder_quotas.add(api.base.QuotaSet(quota))

    # Quota Usages
    quota_usage_data = {
        'gigabytes': {
            'used': 0,
            'quota': 1000
        },
        'volumes': {
            'used': 0,
            'quota': 10
        },
        'snapshots': {
            'used': 0,
            'quota': 10
        }
    }
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(api.base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.cinder_quota_usages.add(quota_usage)

    # Availability Zones
    # Cinder returns the following structure from os-availability-zone
    # {"availabilityZoneInfo":
    # [{"zoneState": {"available": true}, "zoneName": "nova"}]}
    # Note that the default zone is still "nova" even though this is cinder
    TEST.cinder_availability_zones.add(
        availability_zones.AvailabilityZone(
            availability_zones.AvailabilityZoneManager(None), {
                'zoneName': 'nova',
                'zoneState': {
                    'available': True
                }
            }))
    # Cinder Limits
    limits = {
        "absolute": {
            "totalVolumesUsed": 4,
            "totalGigabytesUsed": 400,
            'totalSnapshotsUsed': 3,
            "maxTotalVolumes": 20,
            "maxTotalVolumeGigabytes": 1000,
            'maxTotalSnapshots': 10,
        }
    }

    TEST.cinder_limits = limits

    # QOS Specs
    qos_spec1 = qos_specs.QoSSpecs(
        qos_specs.QoSSpecsManager(None), {
            "id": "418db45d-6992-4674-b226-80aacad2073c",
            "name": "high_iops",
            "consumer": "back-end",
            "specs": {
                "minIOPS": "1000",
                "maxIOPS": '100000'
            }
        })
    qos_spec2 = qos_specs.QoSSpecs(
        qos_specs.QoSSpecsManager(None), {
            "id": "6ed7035f-992e-4075-8ed6-6eff19b3192d",
            "name": "high_bws",
            "consumer": "back-end",
            "specs": {
                "maxBWS": '5000'
            }
        })

    TEST.cinder_qos_specs.add(qos_spec1, qos_spec2)
    vol_type1.associated_qos_spec = qos_spec1.name
    TEST.cinder_qos_spec_associations.add(vol_type1)

    # volume_transfers
    transfer_1 = volume_transfers.VolumeTransfer(
        volume_transfers.VolumeTransferManager(None), {
            'id': '99999999-8888-7777-6666-555555555555',
            'name': 'test transfer',
            'volume_id': volume.id,
            'auth_key': 'blah',
            'created_at': ''
        })
    TEST.cinder_volume_transfers.add(transfer_1)

    # Pools
    pool1 = pools.Pool(
        pools.PoolManager(None), {
            "QoS_support": False,
            "allocated_capacity_gb": 0,
            "driver_version": "3.0.0",
            "free_capacity_gb": 10,
            "extra_specs": {
                "description": "LVM Extra specs",
                "display_name": "LVMDriver",
                "namespace": "OS::Cinder::LVMDriver",
                "type": "object",
            },
            "name": "devstack@lvmdriver-1#lvmdriver-1",
            "pool_name": "lvmdriver-1",
            "reserved_percentage": 0,
            "storage_protocol": "iSCSI",
            "total_capacity_gb": 10,
            "vendor_name": "Open Source",
            "volume_backend_name": "lvmdriver-1"
        })

    pool2 = pools.Pool(
        pools.PoolManager(None), {
            "QoS_support": False,
            "allocated_capacity_gb": 2,
            "driver_version": "3.0.0",
            "free_capacity_gb": 5,
            "extra_specs": {
                "description": "LVM Extra specs",
                "display_name": "LVMDriver",
                "namespace": "OS::Cinder::LVMDriver",
                "type": "object",
            },
            "name": "devstack@lvmdriver-2#lvmdriver-2",
            "pool_name": "lvmdriver-2",
            "reserved_percentage": 0,
            "storage_protocol": "iSCSI",
            "total_capacity_gb": 10,
            "vendor_name": "Open Source",
            "volume_backend_name": "lvmdriver-2"
        })

    TEST.cinder_pools.add(pool1)
    TEST.cinder_pools.add(pool2)

    group_type_1 = group_types.GroupType(
        group_types.GroupTypeManager(None), {
            "is_public": True,
            "group_specs": {},
            "id": "4645cbf7-8aa6-4d42-a5f7-24e6ebe5ba79",
            "name": "group-type-1",
            "description": None,
        })
    TEST.cinder_group_types.add(group_type_1)

    group_1 = groups.Group(
        groups.GroupManager(None), {
            "availability_zone": "nova",
            "created_at": "2018-01-09T07:27:22.000000",
            "description": "description for group1",
            "group_snapshot_id": None,
            "group_type": group_type_1.id,
            "id": "f64646ac-9bf7-483f-bd85-96c34050a528",
            "name": "group1",
            "replication_status": "disabled",
            "source_group_id": None,
            "status": "available",
            "volume_types": [
                vol_type1.id,
            ]
        })
    TEST.cinder_groups.add(cinder_api.Group(group_1))

    group_snapshot_1 = group_snapshots.GroupSnapshot(
        group_snapshots.GroupSnapshotManager(None), {
            "created_at": "2018-01-09T07:46:03.000000",
            "description": "",
            "group_id": group_1.id,
            "group_type_id": group_type_1.id,
            "id": "1036d913-9cb8-46a1-9f56-2f99dc1f14ed",
            "name": "group-snap1",
            "status": "available",
        })
    TEST.cinder_group_snapshots.add(group_snapshot_1)

    group_volume_1 = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "fe9a2664-0f49-4354-bab6-11b2ad352630",
            'status': 'available',
            'size': 2,
            'name': 'group1-volume1',
            'display_description': 'Volume 1 in Group 1',
            'created_at': '2014-01-27 10:30:00',
            'volume_type': 'vol_type_1',
            'group_id': group_1.id,
            'attachments': []
        })

    group_volume_2 = volumes.Volume(
        volumes.VolumeManager(None), {
            'id': "a7fb0402-88dc-45a3-970c-d732da63466e",
            'status': 'available',
            'size': 1,
            'name': 'group1-volume2',
            'display_description': 'Volume 2 in Group 1',
            'created_at': '2014-01-30 10:31:00',
            'volume_type': 'vol_type_1',
            'group_id': group_1.id,
            'attachments': []
        })
    TEST.cinder_group_volumes.add(group_volume_1)
    TEST.cinder_group_volumes.add(group_volume_2)

    snapshot5 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None), {
            'id': 'cd6be1eb-82ca-4587-8036-13c37c00c2b1',
            'name': '',
            'description': 'v2 volume snapshot with metadata description',
            'size': 80,
            'status': 'available',
            'volume_id': '7e4efa56-9ca1-45ff-b83c-2efb2383930d',
            'metadata': {
                'snapshot_meta_key': 'snapshot_meta_value'
            },
            'group_snapshot_id': group_snapshot_1.id
        })

    TEST.cinder_volume_snapshots_with_groups.add(
        api.cinder.VolumeSnapshot(snapshot5))
Exemplo n.º 8
0
def data(TEST):
    TEST.servers = utils.TestDataContainer()
    TEST.flavors = utils.TestDataContainer()
    TEST.flavor_access = utils.TestDataContainer()
    TEST.keypairs = utils.TestDataContainer()
    TEST.volumes = utils.TestDataContainer()
    TEST.quotas = utils.TestDataContainer()
    TEST.quota_usages = utils.TestDataContainer()
    TEST.disabled_quotas = utils.TestDataContainer()
    TEST.usages = utils.TestDataContainer()
    TEST.certs = utils.TestDataContainer()
    TEST.availability_zones = utils.TestDataContainer()
    TEST.hypervisors = utils.TestDataContainer()
    TEST.services = utils.TestDataContainer()
    TEST.aggregates = utils.TestDataContainer()
    TEST.hosts = utils.TestDataContainer()
    TEST.server_groups = utils.TestDataContainer()

    server_group = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        dict(
            id="41023e92-8008-4c8b-8059-7f2293ff3775",
            name='test',
            policies=['test'],
        ))
    TEST.server_groups.add(server_group)

    # Volumes
    volume = volumes.Volume(
        volumes.VolumeManager(None), {
            "id": "41023e92-8008-4c8b-8059-7f2293ff3775",
            "name": 'test_volume',
            "status": 'available',
            "size": 40,
            "display_name": 'Volume name',
            "created_at": '2012-04-01 10:30:00',
            "volume_type": None,
            "attachments": []
        })
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            "id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
            "name": '',
            "status": 'in-use',
            "size": 10,
            "display_name": '',
            "display_description": '',
            "device": "/dev/hda",
            "created_at": '2010-11-21 18:34:25',
            "volume_type": 'vol_type_1',
            "attachments": [{
                "id": "1",
                "server_id": '1',
                "device": "/dev/hda"
            }]
        })
    attached_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            "id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
            "name": 'my_volume',
            "status": 'in-use',
            "size": 30,
            "display_name": 'My Volume',
            "display_description": '',
            "device": "/dev/hdk",
            "created_at": '2011-05-01 11:54:33',
            "volume_type": 'vol_type_2',
            "attachments": [{
                "id": "2",
                "server_id": '1',
                "device": "/dev/hdk"
            }]
        })
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None), {
            "id": "41023e92-8008-4c8b-8059-7f2293ff3771",
            "name": 'non_bootable_volume',
            "status": 'available',
            "size": 40,
            "display_name": 'Non Bootable Volume',
            "created_at": '2012-04-01 10:30:00',
            "volume_type": None,
            "attachments": []
        })

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    attached_volume.bootable = 'true'
    non_bootable_volume.bootable = 'false'

    TEST.volumes.add(volume)
    TEST.volumes.add(nameless_volume)
    TEST.volumes.add(attached_volume)
    TEST.volumes.add(non_bootable_volume)

    # Flavors
    flavor_1 = flavors.Flavor(
        flavors.FlavorManager(None), {
            'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
            'name': 'm1.tiny',
            'vcpus': 1,
            'disk': 0,
            'ram': 512,
            'swap': 0,
            'rxtx_factor': 1,
            'extra_specs': {},
            'os-flavor-access:is_public': True,
            'OS-FLV-EXT-DATA:ephemeral': 0
        })
    flavor_2 = flavors.Flavor(
        flavors.FlavorManager(None), {
            'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
            'name': 'm1.massive',
            'vcpus': 100,
            'disk': 1024,
            'ram': 10000,
            'swap': 0,
            'rxtx_factor': 1,
            'extra_specs': {
                'Trusted': True,
                'foo': 'bar'
            },
            'os-flavor-access:is_public': True,
            'OS-FLV-EXT-DATA:ephemeral': 2048
        })
    flavor_3 = flavors.Flavor(
        flavors.FlavorManager(None), {
            'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
            'name': 'm1.secret',
            'vcpus': 1000,
            'disk': 1024,
            'ram': 10000,
            'swap': 0,
            'rxtx_factor': 1,
            'extra_specs': {},
            'os-flavor-access:is_public': False,
            'OS-FLV-EXT-DATA:ephemeral': 2048
        })
    flavor_4 = flavors.Flavor(
        flavors.FlavorManager(None), {
            'id':
            "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
            'name':
            'm1.metadata',
            'vcpus':
            1000,
            'disk':
            1024,
            'ram':
            10000,
            'swap':
            0,
            'rxtx_factor':
            1,
            'extra_specs':
            FlavorExtraSpecs({
                'key': 'key_mock',
                'value': 'value_mock'
            }),
            'os-flavor-access:is_public':
            False,
            'OS-FLV-EXT-DATA:ephemeral':
            2048
        })
    TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)

    flavor_access_manager = flavor_access.FlavorAccessManager(None)
    flavor_access_1 = flavor_access.FlavorAccess(
        flavor_access_manager, {
            "tenant_id": "1",
            "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"
        })
    flavor_access_2 = flavor_access.FlavorAccess(
        flavor_access_manager, {
            "tenant_id": "2",
            "flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"
        })
    TEST.flavor_access.add(flavor_access_1, flavor_access_2)

    # Key pairs
    keypair = keypairs.Keypair(keypairs.KeypairManager(None),
                               dict(name='keyName'))
    TEST.keypairs.add(keypair)

    # Quota Sets
    quota_data = dict(metadata_items='1',
                      injected_file_content_bytes='1',
                      ram=10000,
                      floating_ips='1',
                      fixed_ips='10',
                      instances='10',
                      injected_files='1',
                      cores='10',
                      server_groups='10',
                      server_group_members='10',
                      security_groups='10',
                      security_group_rules='20',
                      key_pairs=100,
                      injected_file_path_bytes=255)
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.quotas.nova = base.QuotaSet(quota)
    TEST.quotas.add(base.QuotaSet(quota))

    # nova quotas disabled when neutron is enabled
    disabled_quotas_nova = {
        'floating_ips', 'fixed_ips', 'security_groups', 'security_group_rules'
    }
    TEST.disabled_quotas.add(disabled_quotas_nova)

    # Quota Usages
    quota_usage_data = {
        'gigabytes': {
            'used': 0,
            'quota': 1000
        },
        'instances': {
            'used': 0,
            'quota': 10
        },
        'ram': {
            'used': 0,
            'quota': 10000
        },
        'cores': {
            'used': 0,
            'quota': 20
        },
        'floating_ips': {
            'used': 0,
            'quota': 10
        },
        'security_groups': {
            'used': 0,
            'quota': 10
        },
        'volumes': {
            'used': 0,
            'quota': 10
        }
    }
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.quota_usages.add(quota_usage)

    # Limits
    limits = {
        "absolute": {
            "maxImageMeta": 128,
            "maxPersonality": 5,
            "maxPersonalitySize": 10240,
            "maxSecurityGroupRules": 20,
            "maxSecurityGroups": 10,
            "maxServerMeta": 128,
            "maxTotalCores": 20,
            "maxTotalFloatingIps": 10,
            "maxTotalInstances": 10,
            "maxTotalKeypairs": 100,
            "maxTotalRAMSize": 10000,
            "totalCoresUsed": 0,
            "totalInstancesUsed": 0,
            "totalKeyPairsUsed": 0,
            "totalRAMUsed": 0,
            "totalSecurityGroupsUsed": 0
        }
    }
    TEST.limits = limits

    # Servers
    tenant3 = TEST.tenants.list()[2]

    vals = {
        "host": "http://nova.example.com:8774",
        "name": "server_1",
        "status": "ACTIVE",
        "tenant_id": TEST.tenants.first().id,
        "user_id": TEST.user.id,
        "server_id": "1",
        "flavor_id": flavor_1.id,
        "image_id": TEST.images.first().id,
        "key_name": keypair.name
    }
    server_1 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_2", "status": "BUILD", "server_id": "2"})
    server_2 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({
        "name": "server_3",
        "status": "ACTIVE",
        "tenant_id": tenant3.id,
        "server_id": "3"
    })
    server_3 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    vals.update({"name": "server_4", "status": "PAUSED", "server_id": "4"})
    server_4 = servers.Server(servers.ServerManager(None),
                              json.loads(SERVER_DATA % vals)['server'])
    TEST.servers.add(server_1, server_2, server_3, server_4)

    # VNC Console Data
    console = {
        u'console': {
            u'url': u'http://example.com:6080/vnc_auto.html',
            u'type': u'novnc'
        }
    }
    TEST.servers.vnc_console_data = console
    # SPICE Console Data
    console = {
        u'console': {
            u'url': u'http://example.com:6080/spice_auto.html',
            u'type': u'spice'
        }
    }
    TEST.servers.spice_console_data = console
    # RDP Console Data
    console = {
        u'console': {
            u'url': u'http://example.com:6080/rdp_auto.html',
            u'type': u'rdp'
        }
    }
    TEST.servers.rdp_console_data = console

    # Usage
    usage_vals = {
        "tenant_id": TEST.tenant.id,
        "instance_name": server_1.name,
        "flavor_name": flavor_1.name,
        "flavor_vcpus": flavor_1.vcpus,
        "flavor_disk": flavor_1.disk,
        "flavor_ram": flavor_1.ram
    }
    usage_obj = usage.Usage(usage.UsageManager(None),
                            json.loads(USAGE_DATA % usage_vals))
    TEST.usages.add(usage_obj)

    usage_2_vals = {
        "tenant_id": tenant3.id,
        "instance_name": server_3.name,
        "flavor_name": flavor_1.name,
        "flavor_vcpus": flavor_1.vcpus,
        "flavor_disk": flavor_1.disk,
        "flavor_ram": flavor_1.ram
    }
    usage_obj_2 = usage.Usage(usage.UsageManager(None),
                              json.loads(USAGE_DATA % usage_2_vals))
    TEST.usages.add(usage_obj_2)

    cert_data = {'private_key': 'private', 'data': 'certificate_data'}
    certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
    TEST.certs.add(certificate)

    # Availability Zones
    TEST.availability_zones.add(
        availability_zones.AvailabilityZone(
            availability_zones.AvailabilityZoneManager(None),
            {
                'zoneName': 'nova',
                'zoneState': {
                    'available': True
                },
                'hosts': {
                    "host001": {
                        "nova-network": {
                            "active": True,
                            "available": True,
                        },
                    },
                },
            },
        ))

    # hypervisors
    hypervisor_1 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {
                "host": "devstack001",
                "id": 3
            },
            "vcpus_used": 1,
            "vcpus_by_node": '{"0": 1}',
            "vcpus_used_by_node": '{"0": {"shared": 0, "dedicated": 1}}',
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb_used_by_node": '{"0": {"4k": 0, "2M": 750, "1G": 0}}',
            "memory_mb": 2000,
            "memory_mb_by_node": '{"0": {"4k": 0, "2M": 1000, "1G": 0}}',
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": {
                "vendor": "Intel",
                "model": "core2duo",
                "arch": "x86_64",
                "features": ["lahf_lm", "rdtscp"],
                "topology": {
                    "cores": 1,
                    "threads": 1,
                    "sockets": 1
                }
            },
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 1,
            "servers": [{
                "name": "test_name",
                "uuid": "test_uuid"
            }]
        },
    )

    hypervisor_2 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {
                "host": "devstack002",
                "id": 4
            },
            "vcpus_used": 1,
            "vcpus_by_node": '{"0": 1}',
            "vcpus_used_by_node": '{"0": {"shared": 0, "dedicated": 1}}',
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack001",
            "memory_mb_used": 1500,
            "memory_mb_used_by_node": '{"0": {"4k": 0, "2M": 750, "1G": 0}}',
            "memory_mb": 2000,
            "memory_mb_by_node": '{"0": {"4k": 0, "2M": 1000, "1G": 0}}',
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": {
                "vendor": "Intel",
                "model": "core2duo",
                "arch": "x86_64",
                "features": ["lahf_lm", "rdtscp"],
                "topology": {
                    "cores": 1,
                    "threads": 1,
                    "sockets": 1
                }
            },
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 2,
            "servers": [{
                "name": "test_name_2",
                "uuid": "test_uuid_2"
            }]
        },
    )
    hypervisor_3 = hypervisors.Hypervisor(
        hypervisors.HypervisorManager(None),
        {
            "service": {
                "host": "instance-host",
                "id": 5
            },
            "vcpus_used": 1,
            "vcpus_by_node": '{"0": 1}',
            "vcpus_used_by_node": '{"0": {"shared": 0, "dedicated": 1}}',
            "hypervisor_type": "QEMU",
            "local_gb_used": 20,
            "hypervisor_hostname": "devstack003",
            "memory_mb_used": 1500,
            "memory_mb_used_by_node": '{"0": {"4k": 0, "2M": 750, "1G": 0}}',
            "memory_mb": 2000,
            "memory_mb_by_node": '{"0": {"4k": 0, "2M": 1000, "1G": 0}}',
            "current_workload": 0,
            "vcpus": 1,
            "cpu_info": {
                "vendor": "Intel",
                "model": "core2duo",
                "arch": "x86_64",
                "features": ["lahf_lm", "rdtscp"],
                "topology": {
                    "cores": 1,
                    "threads": 1,
                    "sockets": 1
                }
            },
            "running_vms": 1,
            "free_disk_gb": 9,
            "hypervisor_version": 1002000,
            "disk_available_least": 6,
            "local_gb": 29,
            "free_ram_mb": 500,
            "id": 3,
        },
    )
    TEST.hypervisors.add(hypervisor_1)
    TEST.hypervisors.add(hypervisor_2)
    TEST.hypervisors.add(hypervisor_3)

    TEST.hypervisors.stats = {
        "hypervisor_statistics": {
            "count": 5,
            "vcpus_used": 3,
            "local_gb_used": 15,
            "memory_mb": 483310,
            "current_workload": 0,
            "vcpus": 160,
            "running_vms": 3,
            "free_disk_gb": 12548,
            "disk_available_least": 12556,
            "local_gb": 12563,
            "free_ram_mb": 428014,
            "memory_mb_used": 55296,
        }
    }

    # Services
    service_1 = services.Service(
        services.ServiceManager(None), {
            "status": "enabled",
            "binary": "nova-conductor",
            "zone": "internal",
            "state": "up",
            "updated_at": "2013-07-08T05:21:00.000000",
            "host": "devstack001",
            "disabled_reason": None,
        })

    service_2 = services.Service(
        services.ServiceManager(None), {
            "status": "enabled",
            "binary": "nova-compute",
            "zone": "nova",
            "state": "up",
            "updated_at": "2013-07-08T05:20:51.000000",
            "host": "devstack001",
            "disabled_reason": None,
        })

    service_3 = services.Service(
        services.ServiceManager(None), {
            "status": "enabled",
            "binary": "nova-compute",
            "zone": "nova",
            "state": "down",
            "updated_at": "2013-07-08T04:20:51.000000",
            "host": "devstack002",
            "disabled_reason": None,
        })

    service_4 = services.Service(
        services.ServiceManager(None), {
            "status": "disabled",
            "binary": "nova-compute",
            "zone": "nova",
            "state": "up",
            "updated_at": "2013-07-08T04:20:51.000000",
            "host": "devstack003",
            "disabled_reason": None,
        })

    TEST.services.add(service_1)
    TEST.services.add(service_2)
    TEST.services.add(service_3)
    TEST.services.add(service_4)

    # Aggregates
    aggregate_1 = aggregates.Aggregate(
        aggregates.AggregateManager(None), {
            "name": "foo",
            "availability_zone": "testing",
            "deleted": 0,
            "created_at": "2013-07-04T13:34:38.000000",
            "updated_at": None,
            "hosts": ["foo", "bar"],
            "deleted_at": None,
            "id": 1,
            "metadata": {
                "foo": "testing",
                "bar": "testing"
            },
        })

    aggregate_2 = aggregates.Aggregate(
        aggregates.AggregateManager(None), {
            "name": "bar",
            "availability_zone": "testing",
            "deleted": 0,
            "created_at": "2013-07-04T13:34:38.000000",
            "updated_at": None,
            "hosts": ["foo", "bar"],
            "deleted_at": None,
            "id": 2,
            "metadata": {
                "foo": "testing",
                "bar": "testing"
            },
        })

    TEST.aggregates.add(aggregate_1)
    TEST.aggregates.add(aggregate_2)

    host1 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack001",
        "service": "compute",
        "zone": "testing",
    })

    host2 = hosts.Host(
        hosts.HostManager(None), {
            "host_name": "devstack002",
            "service": "nova-conductor",
            "zone": "testing",
        })

    host3 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack003",
        "service": "compute",
        "zone": "testing",
    })

    host4 = hosts.Host(hosts.HostManager(None), {
        "host_name": "devstack004",
        "service": "compute",
        "zone": "testing",
    })

    TEST.hosts.add(host1)
    TEST.hosts.add(host2)
    TEST.hosts.add(host3)
    TEST.hosts.add(host4)

    server_group_1 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "1",
            "name": "server_group_1",
            "policies": [],
        },
    )

    server_group_2 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "2",
            "name": "server_group_2",
            "policies": ["affinity", "some_other_policy"],
        },
    )

    server_group_3 = server_groups.ServerGroup(
        server_groups.ServerGroupsManager(None),
        {
            "id": "3",
            "name": "server_group_3",
            "policies": ["anti-affinity", "some_other_policy"],
        },
    )

    TEST.server_groups.add(server_group_1)
    TEST.server_groups.add(server_group_2)
    TEST.server_groups.add(server_group_3)
Exemplo n.º 9
0
def data(TEST):
    TEST.stacks = utils.TestDataContainer()
    TEST.stack_templates = utils.TestDataContainer()
    TEST.stack_environments = utils.TestDataContainer()
    TEST.stack_snapshot_create = utils.TestDataContainer()
    TEST.stack_snapshot = utils.TestDataContainer()
    TEST.resource_types = utils.TestDataContainer()
    TEST.heat_resources = utils.TestDataContainer()
    TEST.heat_services = utils.TestDataContainer()
    TEST.template_versions = utils.TestDataContainer()
    TEST.template_functions = utils.TestDataContainer()

    # Services
    service_1 = services.Service(
        services.ServiceManager(None), {
            "status": "up",
            "binary": "heat-engine",
            "report_interval": 60,
            "engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d1",
            "created_at": "2015-02-06T03:23:32.000000",
            "hostname": "mrkanag",
            "updated_at": "2015-02-20T09:49:52.000000",
            "topic": "engine",
            "host": "engine-1",
            "deleted_at": None,
            "id": "1efd7015-5016-4caa-b5c8-12438af7b100"
        })

    service_2 = services.Service(
        services.ServiceManager(None), {
            "status": "up",
            "binary": "heat-engine",
            "report_interval": 60,
            "engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d2",
            "created_at": "2015-02-06T03:23:32.000000",
            "hostname": "mrkanag",
            "updated_at": "2015-02-20T09:49:52.000000",
            "topic": "engine",
            "host": "engine-2",
            "deleted_at": None,
            "id": "1efd7015-5016-4caa-b5c8-12438af7b100"
        })

    TEST.heat_services.add(service_1)
    TEST.heat_services.add(service_2)

    # Data return by heatclient.
    TEST.api_resource_types = utils.TestDataContainer()

    for i in range(10):
        stack_data = {
            "description":
            "No description",
            "links": [{
                "href":
                "http://192.168.1.70:8004/v1/"
                "051c727ee67040d6a7b7812708485a97/"
                "stacks/stack-test{0}/"
                "05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i),
                "rel":
                "self"
            }],
            "parameters": {
                'DBUsername':
                '******',
                'InstanceType':
                'm1.small',
                'AWS::StackId':
                ('arn:openstack:heat::2ce287:stacks/teststack/88553ec'),
                'DBRootPassword':
                '******',
                'AWS::StackName':
                "teststack{0}".format(i),
                'DBPassword':
                '******',
                'AWS::Region':
                'ap-southeast-1',
                'DBName':
                u'wordpress'
            },
            "stack_status_reason":
            "Stack successfully created",
            "stack_name":
            "stack-test{0}".format(i),
            "creation_time":
            "2013-04-22T00:11:39Z",
            "updated_time":
            "2013-04-22T00:11:39Z",
            "stack_status":
            "CREATE_COMPLETE",
            "id":
            "05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i)
        }
        stack = stacks.Stack(stacks.StackManager(None), stack_data)
        TEST.stacks.add(stack)

    for i in range(10):
        snapshot_data = {
            "status": "COMPLETE",
            "name": 'null',
            "data": {
                "files": {},
                "status": "COMPLETE",
                "name": "zhao3",
                "tags": ["a", " 123", " b", " 456"],
                "stack_user_project_id": "3cba4460875444049a2a7cc5420ccddb",
                "environment": {
                    "encrypted_param_names": [],
                    "parameter_defaults": {},
                    "event_sinks": [],
                    "parameters": {},
                    "resource_registry": {
                        "resources": {}
                    }
                },
                "template": {
                    "heat_template_version": "2013-05-23",
                    "description": "HOT template for Test.",
                    "resources": {
                        "private_subnet": {
                            "type": "OS::Neutron::Subnet",
                            "properties": {
                                "network_id": {
                                    "get_resource": "private_net"
                                },
                                "cidr": "172.16.2.0/24",
                                "gateway_ip": "172.16.2.1"
                            }
                        },
                        "private_net": {
                            "type": "OS::Neutron::Net",
                            "properties": {
                                "name": "private-net"
                            }
                        }
                    }
                },
                "action": "SNAPSHOT",
                "project_id": "1acd0026829f4d28bb2eff912d7aad0d",
                "id": "70650725-bdbd-419f-b53f-5707767bfe0e",
                "resources": {
                    "private_subnet": {
                        "status": "COMPLETE",
                        "name": "private_subnet",
                        "resource_data": {},
                        "resource_id": "9c7211b3-31c7-41f6-b92a-442ad3f71ef0",
                        "action": "SNAPSHOT",
                        "type": "OS::Neutron::Subnet",
                        "metadata": {}
                    },
                    "private_net": {
                        "status": "COMPLETE",
                        "name": "private_net",
                        "resource_data": {},
                        "resource_id": "ff4fd287-31b2-4d00-bc96-c409bc1db027",
                        "action": "SNAPSHOT",
                        "type": "OS::Neutron::Net",
                        "metadata": {}
                    }
                }
            },
            "creation_time": "2016-02-21T04:02:54",
            "status_reason": "Stack SNAPSHOT completed successfully",
            "id": "01558a3b-ba05-4427-bbb4-1e4ab71cfcad"
        }
        TEST.stack_snapshot.add(snapshot_data)

    TEST.stack_templates.add(Template(TEMPLATE, VALIDATE))
    TEST.stack_environments.add(Environment(ENVIRONMENT))
    TEST.stack_snapshot_create.add(Snapshot(SNAPSHOT_CREATE))

    # Resource types list
    r_type_1 = {
        "resource_type": "AWS::CloudFormation::Stack",
        "attributes": {},
        "properties": {
            "Parameters": {
                "description":
                "The set of parameters passed to this nested stack.",
                "immutable": False,
                "required": False,
                "type": "map",
                "update_allowed": True
            },
            "TemplateURL": {
                "description": "The URL of a template that specifies"
                " the stack to be created as a resource.",
                "immutable": False,
                "required": True,
                "type": "string",
                "update_allowed": True
            },
            "TimeoutInMinutes": {
                "description": "The length of time, in minutes,"
                " to wait for the nested stack creation.",
                "immutable": False,
                "required": False,
                "type": "number",
                "update_allowed": True
            }
        }
    }

    r_type_2 = {
        "resource_type": "OS::Heat::CloudConfig",
        "attributes": {
            "config": {
                "description": "The config value of the software config."
            }
        },
        "properties": {
            "cloud_config": {
                "description": "Map representing the cloud-config data"
                " structure which will be formatted as YAML.",
                "immutable": False,
                "required": False,
                "type": "map",
                "update_allowed": False
            }
        }
    }

    r_types_list = [r_type_1, r_type_2]

    for rt in r_types_list:
        r_type = resource_types.ResourceType(
            resource_types.ResourceTypeManager(None), rt['resource_type'])
        TEST.resource_types.add(r_type)
        TEST.api_resource_types.add(rt)

    # Resources
    resource_1 = resources.Resource(
        resources.ResourceManager(None), {
            "logical_resource_id":
            "my_resource",
            "physical_resource_id":
            "7b5e29b1-c94d-402d-b69c-df9ac6dfc0ce",
            "resource_name":
            "my_resource",
            "links": [{
                "href":
                "http://192.168.1.70:8004/v1/"
                "051c727ee67040d6a7b7812708485a97/"
                "stacks/%s/%s/resources/my_resource" %
                (TEST.stacks.first().stack_name, TEST.stacks.first().id),
                "rel":
                "self"
            }, {
                "href":
                "http://192.168.1.70:8004/v1/"
                "051c727ee67040d6a7b7812708485a97/"
                "stacks/%s/%s" %
                (TEST.stacks.first().stack_name, TEST.stacks.first().id),
                "rel":
                "stack"
            }],
            "attributes": {
                "metadata": {}
            }
        })

    TEST.heat_resources.add(resource_1)

    # Template versions
    template_version_1 = template_versions.TemplateVersion(
        template_versions.TemplateVersionManager(None), {
            "version": "HeatTemplateFormatVersion.2012-12-12",
            "type": "cfn"
        })

    template_version_2 = template_versions.TemplateVersion(
        template_versions.TemplateVersionManager(None), {
            "version": "heat_template_version.2013-05-23",
            "type": "hot"
        })

    TEST.template_versions.add(template_version_1)
    TEST.template_versions.add(template_version_2)

    # Template functions
    template_function_1 = template_versions.TemplateVersion(
        template_versions.TemplateVersionManager(None), {
            "functions": "Fn::GetAZs",
            "description": "A function for retrieving the availability zones."
        })

    template_function_2 = template_versions.TemplateVersion(
        template_versions.TemplateVersionManager(None), {
            "functions": "Fn::Join",
            "description": "A function for joining strings."
        })

    TEST.template_functions.add(template_function_1)
    TEST.template_functions.add(template_function_2)
Exemplo n.º 10
0
def data(TEST):
    # data returned by openstack_dashboard.api.neutron wrapper
    TEST.agents = utils.TestDataContainer()
    TEST.networks = utils.TestDataContainer()
    TEST.subnets = utils.TestDataContainer()
    TEST.ports = utils.TestDataContainer()
    TEST.routers = utils.TestDataContainer()
    TEST.routers_with_rules = utils.TestDataContainer()
    TEST.q_floating_ips = utils.TestDataContainer()
    TEST.q_secgroups = utils.TestDataContainer()
    TEST.q_secgroup_rules = utils.TestDataContainer()
    TEST.providers = utils.TestDataContainer()
    TEST.pools = utils.TestDataContainer()
    TEST.vips = utils.TestDataContainer()
    TEST.members = utils.TestDataContainer()
    TEST.monitors = utils.TestDataContainer()
    TEST.neutron_quotas = utils.TestDataContainer()
    TEST.net_profiles = utils.TestDataContainer()
    TEST.policy_profiles = utils.TestDataContainer()
    TEST.network_profile_binding = utils.TestDataContainer()
    TEST.policy_profile_binding = utils.TestDataContainer()
    TEST.vpnservices = utils.TestDataContainer()
    TEST.ikepolicies = utils.TestDataContainer()
    TEST.ipsecpolicies = utils.TestDataContainer()
    TEST.ipsecsiteconnections = utils.TestDataContainer()
    TEST.firewalls = utils.TestDataContainer()
    TEST.fw_policies = utils.TestDataContainer()
    TEST.fw_rules = utils.TestDataContainer()

    # data return by neutronclient
    TEST.api_agents = utils.TestDataContainer()
    TEST.api_networks = utils.TestDataContainer()
    TEST.api_subnets = utils.TestDataContainer()
    TEST.api_ports = utils.TestDataContainer()
    TEST.api_routers = utils.TestDataContainer()
    TEST.api_q_floating_ips = utils.TestDataContainer()
    TEST.api_q_secgroups = utils.TestDataContainer()
    TEST.api_q_secgroup_rules = utils.TestDataContainer()
    TEST.api_pools = utils.TestDataContainer()
    TEST.api_vips = utils.TestDataContainer()
    TEST.api_members = utils.TestDataContainer()
    TEST.api_monitors = utils.TestDataContainer()
    TEST.api_extensions = utils.TestDataContainer()
    TEST.api_net_profiles = utils.TestDataContainer()
    TEST.api_policy_profiles = utils.TestDataContainer()
    TEST.api_network_profile_binding = utils.TestDataContainer()
    TEST.api_policy_profile_binding = utils.TestDataContainer()
    TEST.api_vpnservices = utils.TestDataContainer()
    TEST.api_ikepolicies = utils.TestDataContainer()
    TEST.api_ipsecpolicies = utils.TestDataContainer()
    TEST.api_ipsecsiteconnections = utils.TestDataContainer()
    TEST.api_firewalls = utils.TestDataContainer()
    TEST.api_fw_policies = utils.TestDataContainer()
    TEST.api_fw_rules = utils.TestDataContainer()

    #------------------------------------------------------------
    # 1st network
    network_dict = {
        'admin_state_up': True,
        'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
        'name': 'net1',
        'status': 'ACTIVE',
        'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'],
        'tenant_id': '1',
        'router:external': False,
        'shared': False
    }
    subnet_dict = {
        'allocation_pools': [{
            'end': '10.0.0.254',
            'start': '10.0.0.2'
        }],
        'dns_nameservers': [],
        'host_routes': [],
        'cidr': '10.0.0.0/24',
        'enable_dhcp': True,
        'gateway_ip': '10.0.0.1',
        'id': network_dict['subnets'][0],
        'ip_version': 4,
        'name': 'mysubnet1',
        'network_id': network_dict['id'],
        'tenant_id': network_dict['tenant_id']
    }

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    # network profile for network when using the cisco n1k plugin
    net_profile_dict = {
        'name': 'net_profile_test1',
        'segment_type': 'vlan',
        'physical_network': 'phys1',
        'segment_range': '3000-31000',
        'id': '00000000-1111-1111-1111-000000000000',
        'tenant_id': network_dict['tenant_id']
    }

    TEST.api_net_profiles.add(net_profile_dict)
    TEST.net_profiles.add(neutron.Profile(net_profile_dict))

    # policy profile for port when using the cisco n1k plugin
    policy_profile_dict = {
        'name': 'policy_profile_test1',
        'id': '00000000-9999-9999-9999-000000000000'
    }

    TEST.api_policy_profiles.add(policy_profile_dict)
    TEST.policy_profiles.add(neutron.Profile(policy_profile_dict))

    # network profile binding
    network_profile_binding_dict = {
        'profile_id': '00000000-1111-1111-1111-000000000000',
        'tenant_id': network_dict['tenant_id']
    }

    TEST.api_network_profile_binding.add(network_profile_binding_dict)
    TEST.network_profile_binding.add(
        neutron.Profile(network_profile_binding_dict))

    # policy profile binding
    policy_profile_binding_dict = {
        'profile_id': '00000000-9999-9999-9999-000000000000',
        'tenant_id': network_dict['tenant_id']
    }

    TEST.api_policy_profile_binding.add(policy_profile_binding_dict)
    TEST.policy_profile_binding.add(
        neutron.Profile(policy_profile_binding_dict))

    # ports on 1st network
    port_dict = {
        'admin_state_up': True,
        'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
        'device_owner': 'network:dhcp',
        'fixed_ips': [{
            'ip_address': '10.0.0.3',
            'subnet_id': subnet_dict['id']
        }],
        'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
        'mac_address': 'fa:16:3e:9c:d5:7e',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id']
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    port_dict = {
        'admin_state_up': True,
        'device_id': '1',
        'device_owner': 'compute:nova',
        'fixed_ips': [{
            'ip_address': '10.0.0.4',
            'subnet_id': subnet_dict['id']
        }],
        'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
        'mac_address': 'fa:16:3e:9d:e6:2f',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': network_dict['tenant_id']
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))
    assoc_port = port_dict

    #------------------------------------------------------------
    # 2nd network
    network_dict = {
        'admin_state_up': True,
        'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
        'name': 'net2',
        'status': 'ACTIVE',
        'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
        'tenant_id': '2',
        'router:external': False,
        'shared': True
    }
    subnet_dict = {
        'allocation_pools': [{
            'end': '172.16.88.254',
            'start': '172.16.88.2'
        }],
        'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
        'host_routes': [{
            'destination': '192.168.20.0/24',
            'nexthop': '172.16.88.253'
        }, {
            'destination': '192.168.21.0/24',
            'nexthop': '172.16.88.252'
        }],
        'cidr':
        '172.16.88.0/24',
        'enable_dhcp':
        True,
        'gateway_ip':
        '172.16.88.1',
        'id':
        '3f7c5d79-ee55-47b0-9213-8e669fb03009',
        'ip_version':
        4,
        'name':
        'aaaa',
        'network_id':
        network_dict['id'],
        'tenant_id':
        network_dict['tenant_id']
    }

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    port_dict = {
        'admin_state_up':
        True,
        'device_id':
        '2',
        'device_owner':
        'compute:nova',
        'fixed_ips': [{
            'ip_address': '172.16.88.3',
            'subnet_id': subnet_dict['id']
        }],
        'id':
        '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
        'mac_address':
        'fa:16:3e:56:e6:2f',
        'name':
        '',
        'network_id':
        network_dict['id'],
        'status':
        'ACTIVE',
        'tenant_id':
        network_dict['tenant_id']
    }

    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    #------------------------------------------------------------
    # external network
    network_dict = {
        'admin_state_up': True,
        'id': '9b466b94-213a-4cda-badf-72c102a874da',
        'name': 'ext_net',
        'status': 'ACTIVE',
        'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
        'tenant_id': '3',
        'router:external': True,
        'shared': False
    }
    subnet_dict = {
        'allocation_pools': [{
            'start': '172.24.4.226.',
            'end': '172.24.4.238'
        }],
        'dns_nameservers': [],
        'host_routes': [],
        'cidr': '172.24.4.0/28',
        'enable_dhcp': False,
        'gateway_ip': '172.24.4.225',
        'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
        'ip_version': 4,
        'name': 'ext_subnet',
        'network_id': network_dict['id'],
        'tenant_id': network_dict['tenant_id']
    }
    ext_net = network_dict

    TEST.api_networks.add(network_dict)
    TEST.api_subnets.add(subnet_dict)

    network = copy.deepcopy(network_dict)
    subnet = neutron.Subnet(subnet_dict)
    network['subnets'] = [subnet]
    TEST.networks.add(neutron.Network(network))
    TEST.subnets.add(subnet)

    #------------------------------------------------------------
    # Set up router data
    port_dict = {
        'admin_state_up': True,
        'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
        'device_owner': 'network:router_gateway',
        'fixed_ips': [{
            'ip_address': '10.0.0.3',
            'subnet_id': subnet_dict['id']
        }],
        'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
        'mac_address': 'fa:16:3e:9c:d5:7e',
        'name': '',
        'network_id': network_dict['id'],
        'status': 'ACTIVE',
        'tenant_id': '1'
    }
    TEST.api_ports.add(port_dict)
    TEST.ports.add(neutron.Port(port_dict))

    router_dict = {
        'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
        'name': 'router1',
        'external_gateway_info': {
            'network_id': ext_net['id']
        },
        'tenant_id': '1'
    }
    TEST.api_routers.add(router_dict)
    TEST.routers.add(neutron.Router(router_dict))
    router_dict = {
        'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
        'name': 'router2',
        'external_gateway_info': {
            'network_id': ext_net['id']
        },
        'tenant_id': '1'
    }
    TEST.api_routers.add(router_dict)
    TEST.routers.add(neutron.Router(router_dict))
    router_dict = {
        'id':
        '71fb25e9-cd9f-4a44-a780-85ec3bd8bdd7',
        'name':
        'rulerouter',
        'external_gateway_info': {
            'network_id': ext_net['id']
        },
        'tenant_id':
        '1',
        'router_rules': [{
            'id': '101',
            'action': 'deny',
            'source': 'any',
            'destination': 'any',
            'nexthops': []
        }, {
            'id': '102',
            'action': 'permit',
            'source': 'any',
            'destination': '8.8.8.8/32',
            'nexthops': ['1.0.0.2', '1.0.0.1']
        }]
    }
    TEST.api_routers.add(router_dict)
    TEST.routers_with_rules.add(neutron.Router(router_dict))

    #------------------------------------------------------------
    # floating IP
    # unassociated
    fip_dict = {
        'tenant_id': '1',
        'floating_ip_address': '172.16.88.227',
        'floating_network_id': ext_net['id'],
        'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
        'fixed_ip_address': None,
        'port_id': None,
        'router_id': None
    }
    TEST.api_q_floating_ips.add(fip_dict)
    TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))

    # associated (with compute port on 1st network)
    fip_dict = {
        'tenant_id': '1',
        'floating_ip_address': '172.16.88.228',
        'floating_network_id': ext_net['id'],
        'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
        'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
        'port_id': assoc_port['id'],
        'router_id': router_dict['id']
    }
    TEST.api_q_floating_ips.add(fip_dict)
    TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))

    #------------------------------------------------------------
    # security group

    sec_group_1 = {
        'tenant_id': '1',
        'description': 'default',
        'id': 'faad7c80-3b62-4440-967c-13808c37131d',
        'name': 'default'
    }
    sec_group_2 = {
        'tenant_id': '1',
        'description': 'NotDefault',
        'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
        'name': 'other_group'
    }
    sec_group_3 = {
        'tenant_id': '1',
        'description': 'NotDefault',
        'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
        'name': 'another_group'
    }

    def add_rule_to_group(secgroup, default_only=True):
        rule_egress_ipv4 = {
            'id': str(uuid.uuid4()),
            'direction': u'egress',
            'ethertype': u'IPv4',
            'port_range_min': None,
            'port_range_max': None,
            'protocol': None,
            'remote_group_id': None,
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']
        }
        rule_egress_ipv6 = {
            'id': str(uuid.uuid4()),
            'direction': u'egress',
            'ethertype': u'IPv6',
            'port_range_min': None,
            'port_range_max': None,
            'protocol': None,
            'remote_group_id': None,
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']
        }

        rule_tcp_80 = {
            'id': str(uuid.uuid4()),
            'direction': u'ingress',
            'ethertype': u'IPv4',
            'port_range_min': 80,
            'port_range_max': 80,
            'protocol': u'tcp',
            'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/0',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']
        }
        rule_icmp = {
            'id': str(uuid.uuid4()),
            'direction': u'ingress',
            'ethertype': u'IPv4',
            'port_range_min': 5,
            'port_range_max': 8,
            'protocol': u'icmp',
            'remote_group_id': None,
            'remote_ip_prefix': u'0.0.0.0/0',
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']
        }
        rule_group = {
            'id': str(uuid.uuid4()),
            'direction': u'ingress',
            'ethertype': u'IPv4',
            'port_range_min': 80,
            'port_range_max': 80,
            'protocol': u'tcp',
            'remote_group_id': sec_group_1['id'],
            'remote_ip_prefix': None,
            'security_group_id': secgroup['id'],
            'tenant_id': secgroup['tenant_id']
        }

        rules = []
        if not default_only:
            rules += [rule_tcp_80, rule_icmp, rule_group]
        rules += [rule_egress_ipv4, rule_egress_ipv6]
        secgroup['security_group_rules'] = rules

    add_rule_to_group(sec_group_1, default_only=False)
    add_rule_to_group(sec_group_2)
    add_rule_to_group(sec_group_3)

    groups = [sec_group_1, sec_group_2, sec_group_3]
    sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
    for sg in groups:
        # Neutron API
        TEST.api_q_secgroups.add(sg)
        for rule in sg['security_group_rules']:
            TEST.api_q_secgroup_rules.add(copy.copy(rule))
        # OpenStack Dashboard internaly API
        TEST.q_secgroups.add(
            neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
        for rule in sg['security_group_rules']:
            TEST.q_secgroup_rules.add(
                neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))

    #------------------------------------------------------------
    # LBaaS

    # 1st pool
    pool_dict = {
        'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
        'tenant_id': '1',
        'vip_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'name': 'pool1',
        'description': 'pool description',
        'subnet_id': TEST.subnets.first().id,
        'protocol': 'HTTP',
        'lb_method': 'ROUND_ROBIN',
        'health_monitors': ['d4a0500f-db2b-4cc4-afcf-ec026febff96'],
        'admin_state_up': True,
        'status': 'ACTIVE',
        'provider': 'haproxy'
    }
    TEST.api_pools.add(pool_dict)
    TEST.pools.add(lbaas.Pool(pool_dict))

    # 1st vip
    vip_dict = {
        'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'name': 'vip1',
        'address': '10.0.0.100',
        'floatip_address': '',
        'other_address': '10.0.0.100',
        'description': 'vip description',
        'subnet_id': TEST.subnets.first().id,
        'subnet': TEST.subnets.first().cidr,
        'protocol_port': 80,
        'protocol': pool_dict['protocol'],
        'pool_id': pool_dict['id'],
        'session_persistence': {
            'type': 'APP_COOKIE',
            'cookie_name': 'jssessionid'
        },
        'connection_limit': 10,
        'admin_state_up': True
    }
    TEST.api_vips.add(vip_dict)
    TEST.vips.add(lbaas.Vip(vip_dict))

    # 2nd vip
    vip_dict = {
        'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
        'name': 'vip2',
        'address': '10.0.0.110',
        'floatip_address': '',
        'other_address': '10.0.0.110',
        'description': 'vip description',
        'subnet_id': TEST.subnets.first().id,
        'subnet': TEST.subnets.first().cidr,
        'protocol_port': 80,
        'protocol': pool_dict['protocol'],
        'pool_id': pool_dict['id'],
        'session_persistence': {
            'type': 'APP_COOKIE',
            'cookie_name': 'jssessionid'
        },
        'connection_limit': 10,
        'admin_state_up': True
    }
    TEST.api_vips.add(vip_dict)
    TEST.vips.add(lbaas.Vip(vip_dict))

    # 1st member
    member_dict = {
        'id': '78a46e5e-eb1a-418a-88c7-0e3f5968b08',
        'tenant_id': '1',
        'pool_id': pool_dict['id'],
        'address': '10.0.0.11',
        'protocol_port': 80,
        'weight': 10,
        'status': 'ACTIVE',
        'admin_state_up': True
    }
    TEST.api_members.add(member_dict)
    TEST.members.add(lbaas.Member(member_dict))

    # 2nd member
    member_dict = {
        'id': '41ac1f8d-6d9c-49a4-a1bf-41955e651f91',
        'tenant_id': '1',
        'pool_id': pool_dict['id'],
        'address': '10.0.0.12',
        'protocol_port': 80,
        'weight': 10,
        'status': 'ACTIVE',
        'admin_state_up': True
    }
    TEST.api_members.add(member_dict)
    TEST.members.add(lbaas.Member(member_dict))

    # 2nd pool
    pool_dict = {
        'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d50',
        'tenant_id': '1',
        'vip_id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
        'name': 'pool2',
        'description': 'pool description',
        'subnet_id': TEST.subnets.first().id,
        'protocol': 'HTTPS',
        'lb_method': 'ROUND_ROBIN',
        'health_monitors': ['d4a0500f-db2b-4cc4-afcf-ec026febff97'],
        'status': 'PENDING_CREATE',
        'admin_state_up': True
    }
    TEST.api_pools.add(pool_dict)
    TEST.pools.add(lbaas.Pool(pool_dict))

    # 1st monitor
    monitor_dict = {
        'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff96',
        'type': 'ping',
        'delay': 10,
        'timeout': 10,
        'max_retries': 10,
        'http_method': 'GET',
        'url_path': '/',
        'expected_codes': '200',
        'admin_state_up': True
    }
    TEST.api_monitors.add(monitor_dict)
    TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))

    # 2nd monitor
    monitor_dict = {
        'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff97',
        'type': 'ping',
        'delay': 10,
        'timeout': 10,
        'max_retries': 10,
        'http_method': 'GET',
        'url_path': '/',
        'expected_codes': '200',
        'admin_state_up': True
    }
    TEST.api_monitors.add(monitor_dict)
    TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))

    #------------------------------------------------------------
    # Quotas
    quota_data = {
        'network': '10',
        'subnet': '10',
        'port': '50',
        'router': '10',
        'floatingip': '50',
        'security_group': '20',
        'security_group_rule': '100',
    }
    TEST.neutron_quotas.add(base.QuotaSet(quota_data))

    #------------------------------------------------------------
    # Extensions
    extension_1 = {
        "name": "security-group",
        "alias": "security-group",
        "description": "The security groups extension."
    }
    extension_2 = {
        "name": "Quota management support",
        "alias": "quotas",
        "description": "Expose functions for quotas management"
    }
    TEST.api_extensions.add(extension_1)
    TEST.api_extensions.add(extension_2)

    #------------------------------------------------------------
    # 1st agent
    agent_dict = {
        "binary": "neutron-openvswitch-agent",
        "description": None,
        "admin_state_up": True,
        "heartbeat_timestamp": "2013-07-26 06:51:47",
        "alive": True,
        "id": "c876ff05-f440-443e-808c-1d34cda3e88a",
        "topic": "N/A",
        "host": "devstack001",
        "agent_type": "Open vSwitch agent",
        "started_at": "2013-07-26 05:23:28",
        "created_at": "2013-07-26 05:23:28",
        "configurations": {
            "devices": 2
        }
    }
    TEST.api_agents.add(agent_dict)
    TEST.agents.add(neutron.Agent(agent_dict))

    # 2nd agent
    agent_dict = {
        "binary": "neutron-dhcp-agent",
        "description": None,
        "admin_state_up": True,
        "heartbeat_timestamp": "2013-07-26 06:51:48",
        "alive": True,
        "id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
        "topic": "dhcp_agent",
        "host": "devstack001",
        "agent_type": "DHCP agent",
        "started_at": "2013-07-26 05:23:30",
        "created_at": "2013-07-26 05:23:30",
        "configurations": {
            "subnets": 1,
            "use_namespaces": True,
            "dhcp_lease_duration": 120,
            "dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
            "networks": 1,
            "ports": 1
        }
    }
    TEST.api_agents.add(agent_dict)
    TEST.agents.add(neutron.Agent(agent_dict))

    #------------------------------------------------------------
    # Service providers
    provider_1 = {
        "service_type": "LOADBALANCER",
        "name": "haproxy",
        "default": True
    }
    TEST.providers.add(provider_1)

    #------------------------------------------------------------
    # VPNaaS

    # 1st VPNService
    vpnservice_dict = {
        'id': '09a26949-6231-4f72-942a-0c8c0ddd4d61',
        'tenant_id': '1',
        'name': 'cloud_vpn1',
        'description': 'vpn description',
        'subnet_id': TEST.subnets.first().id,
        'router_id': TEST.routers.first().id,
        'vpn_type': 'ipsec',
        'ipsecsiteconnections': [],
        'admin_state_up': True,
        'status': 'Active'
    }
    TEST.api_vpnservices.add(vpnservice_dict)
    TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))

    # 2nd VPNService
    vpnservice_dict = {
        'id': '09a26949-6231-4f72-942a-0c8c0ddd4d62',
        'tenant_id': '1',
        'name': 'cloud_vpn2',
        'description': 'vpn description',
        'subnet_id': TEST.subnets.first().id,
        'router_id': TEST.routers.first().id,
        'vpn_type': 'ipsec',
        'ipsecsiteconnections': [],
        'admin_state_up': True,
        'status': 'Active'
    }
    TEST.api_vpnservices.add(vpnservice_dict)
    TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))

    # 1st IKEPolicy
    ikepolicy_dict = {
        'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c981',
        'tenant_id': '1',
        'name': 'ikepolicy_1',
        'description': 'ikepolicy description',
        'auth_algorithm': 'sha1',
        'encryption_algorithm': 'aes-256',
        'ike_version': 'v1',
        'lifetime': {
            'units': 'seconds',
            'value': 3600
        },
        'phase1_negotiation_mode': 'main',
        'pfs': 'group5'
    }
    TEST.api_ikepolicies.add(ikepolicy_dict)
    TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))

    # 2nd IKEPolicy
    ikepolicy_dict = {
        'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c982',
        'tenant_id': '1',
        'name': 'ikepolicy_2',
        'description': 'ikepolicy description',
        'auth_algorithm': 'sha1',
        'encryption_algorithm': 'aes-256',
        'ike_version': 'v1',
        'lifetime': {
            'units': 'seconds',
            'value': 3600
        },
        'phase1_negotiation_mode': 'main',
        'pfs': 'group5'
    }
    TEST.api_ikepolicies.add(ikepolicy_dict)
    TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))

    # 1st IPSecPolicy
    ipsecpolicy_dict = {
        'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb8',
        'tenant_id': '1',
        'name': 'ipsecpolicy_1',
        'description': 'ipsecpolicy description',
        'auth_algorithm': 'sha1',
        'encapsulation_mode': 'tunnel',
        'encryption_algorithm': '3des',
        'lifetime': {
            'units': 'seconds',
            'value': 3600
        },
        'pfs': 'group5',
        'transform_protocol': 'esp'
    }
    TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
    TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))

    # 2nd IPSecPolicy
    ipsecpolicy_dict = {
        'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb9',
        'tenant_id': '1',
        'name': 'ipsecpolicy_2',
        'description': 'ipsecpolicy description',
        'auth_algorithm': 'sha1',
        'encapsulation_mode': 'tunnel',
        'encryption_algorithm': '3des',
        'lifetime': {
            'units': 'seconds',
            'value': 3600
        },
        'pfs': 'group5',
        'transform_protocol': 'esp'
    }
    TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
    TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))

    # 1st IPSecSiteConnection
    ipsecsiteconnection_dict = {
        'id': 'dd1dd3a0-f349-49be-b013-245e147763d6',
        'tenant_id': '1',
        'name': 'ipsec_connection_1',
        'description': 'vpn connection description',
        'dpd': {
            'action': 'hold',
            'interval': 30,
            'timeout': 120
        },
        'ikepolicy_id': ikepolicy_dict['id'],
        'initiator': 'bi-directional',
        'ipsecpolicy_id': ipsecpolicy_dict['id'],
        'mtu': '1500',
        'peer_address': '2607:f0d0:4545:3:200:f8ff:fe21:67cf',
        'peer_cidrs': '20.1.0.0/24',
        'peer_id': '2607:f0d0:4545:3:200:f8ff:fe21:67cf',
        'psk': 'secret',
        'vpnservice_id': vpnservice_dict['id'],
        'admin_state_up': True,
        'status': 'Active'
    }
    TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
    TEST.ipsecsiteconnections.add(
        vpn.IPSecSiteConnection(ipsecsiteconnection_dict))

    # 2nd IPSecSiteConnection
    ipsecsiteconnection_dict = {
        'id': 'dd1dd3a0-f349-49be-b013-245e147763d7',
        'tenant_id': '1',
        'name': 'ipsec_connection_2',
        'description': 'vpn connection description',
        'dpd': {
            'action': 'hold',
            'interval': 30,
            'timeout': 120
        },
        'ikepolicy_id': ikepolicy_dict['id'],
        'initiator': 'bi-directional',
        'ipsecpolicy_id': ipsecpolicy_dict['id'],
        'mtu': '1500',
        'peer_address': '172.0.0.2',
        'peer_cidrs': '20.1.0.0/24',
        'peer_id': '172.0.0.2',
        'psk': 'secret',
        'vpnservice_id': vpnservice_dict['id'],
        'admin_state_up': True,
        'status': 'Active'
    }
    TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
    TEST.ipsecsiteconnections.add(
        vpn.IPSecSiteConnection(ipsecsiteconnection_dict))

    # FWaaS

    # 1st rule (used by 1st policy)
    rule1_dict = {
        'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
        'tenant_id': '1',
        'name': 'rule1',
        'description': 'rule1 description',
        'protocol': 'tcp',
        'action': 'allow',
        'source_ip_address': '1.2.3.0/24',
        'source_port': '80',
        'destination_ip_address': '4.5.6.7/32',
        'destination_port': '1:65535',
        'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'position': 1,
        'shared': True,
        'enabled': True
    }
    TEST.api_fw_rules.add(rule1_dict)

    rule1 = fwaas.Rule(copy.deepcopy(rule1_dict))
    # NOTE: rule1['policy'] is set below
    TEST.fw_rules.add(rule1)

    # 2nd rule (used by 2nd policy; no name)
    rule2_dict = {
        'id': 'c6298a93-850f-4f64-b78a-959fd4f1e5df',
        'tenant_id': '1',
        'name': '',
        'description': '',
        'protocol': 'udp',
        'action': 'deny',
        'source_ip_address': '1.2.3.0/24',
        'source_port': '80',
        'destination_ip_address': '4.5.6.7/32',
        'destination_port': '1:65535',
        'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'position': 2,
        'shared': True,
        'enabled': True
    }
    TEST.api_fw_rules.add(rule2_dict)

    rule2 = fwaas.Rule(copy.deepcopy(rule2_dict))
    # NOTE: rule2['policy'] is set below
    TEST.fw_rules.add(rule2)

    # 3rd rule (not used by any policy)
    rule3_dict = {
        'id': 'h0881d38-c3eb-4fee-9763-12de3338041d',
        'tenant_id': '1',
        'name': 'rule3',
        'description': 'rule3 description',
        'protocol': 'icmp',
        'action': 'allow',
        'source_ip_address': '1.2.3.0/24',
        'source_port': '80',
        'destination_ip_address': '4.5.6.7/32',
        'destination_port': '1:65535',
        'firewall_policy_id': None,
        'position': None,
        'shared': True,
        'enabled': True
    }
    TEST.api_fw_rules.add(rule3_dict)

    rule3 = fwaas.Rule(copy.deepcopy(rule3_dict))
    # rule3 is not associated with any rules
    rule3._apidict['policy'] = None
    TEST.fw_rules.add(rule3)

    # 1st policy (associated with 2 rules)
    policy1_dict = {
        'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'tenant_id': '1',
        'name': 'policy1',
        'description': 'policy with two rules',
        'firewall_rules': [rule1_dict['id'], rule2_dict['id']],
        'audited': True,
        'shared': True
    }
    TEST.api_fw_policies.add(policy1_dict)

    policy1 = fwaas.Policy(copy.deepcopy(policy1_dict))
    policy1._apidict['rules'] = [rule1, rule2]
    TEST.fw_policies.add(policy1)

    # Reverse relations (rule -> policy)
    rule1._apidict['policy'] = policy1
    rule2._apidict['policy'] = policy1

    # 2nd policy (associated with no rules; no name)
    policy2_dict = {
        'id': 'cf50b331-787a-4623-825e-da794c918d6a',
        'tenant_id': '1',
        'name': '',
        'description': '',
        'firewall_rules': [],
        'audited': False,
        'shared': False
    }
    TEST.api_fw_policies.add(policy2_dict)

    policy2 = fwaas.Policy(copy.deepcopy(policy2_dict))
    policy2._apidict['rules'] = []
    TEST.fw_policies.add(policy2)

    # 1st firewall
    fw1_dict = {
        'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
        'tenant_id': '1',
        'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'name': 'firewall1',
        'description': 'firewall description',
        'status': 'PENDING_CREATE',
        'shared': True,
        'admin_state_up': True
    }
    TEST.api_firewalls.add(fw1_dict)

    fw1 = fwaas.Firewall(copy.deepcopy(fw1_dict))
    fw1._apidict['policy'] = policy1
    TEST.firewalls.add(fw1)

    # 2nd firewall (no name)
    fw2_dict = {
        'id': '1aa75150-415f-458e-bae5-5a362a4fb1f7',
        'tenant_id': '1',
        'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
        'name': '',
        'description': '',
        'status': 'PENDING_CREATE',
        'shared': True,
        'admin_state_up': True
    }
    TEST.api_firewalls.add(fw1_dict)

    fw2 = fwaas.Firewall(copy.deepcopy(fw2_dict))
    fw2._apidict['policy'] = policy1
    TEST.firewalls.add(fw1)
Exemplo n.º 11
0
def data(TEST):
    TEST.images = utils.TestDataContainer()
    TEST.images_api = utils.TestDataContainer()
    TEST.snapshots = utils.TestDataContainer()
    TEST.metadata_defs = utils.TestDataContainer()
    TEST.imagesV2 = utils.TestDataContainer()
    TEST.snapshotsV2 = utils.TestDataContainer()

    # Snapshots
    snapshot_dict = {
        'name': u'snapshot',
        'container_format': u'ami',
        'id': 3,
        'status': "active",
        'owner': TEST.tenant.id,
        'properties': {
            'image_type': u'snapshot'
        },
        'is_public': False,
        'protected': False
    }
    snapshot_dict_no_owner = {
        'name': u'snapshot 2',
        'container_format': u'ami',
        'id': 4,
        'status': "active",
        'owner': None,
        'properties': {
            'image_type': u'snapshot'
        },
        'is_public': False,
        'protected': False
    }
    snapshot_dict_queued = {
        'name': u'snapshot 2',
        'container_format': u'ami',
        'id': 5,
        'status': "queued",
        'owner': TEST.tenant.id,
        'properties': {
            'image_type': u'snapshot'
        },
        'is_public': False,
        'protected': False
    }
    snapshot_dict_with_volume = {
        'name': u'snapshot 2',
        'container_format': u'ami',
        'id': 6,
        'status': "queued",
        'owner': TEST.tenant.id,
        'properties': {
            'block_device_mapping': '[{"source_type": "snapshot"}]'
        },
        'is_public': False,
        'protected': False
    }

    snapshot = images.Image(images.ImageManager(None), snapshot_dict)
    TEST.snapshots.add(api.glance.Image(snapshot))
    snapshot = images.Image(images.ImageManager(None), snapshot_dict_no_owner)
    TEST.snapshots.add(api.glance.Image(snapshot))
    snapshot = images.Image(images.ImageManager(None), snapshot_dict_queued)
    TEST.snapshots.add(api.glance.Image(snapshot))
    snapshot = images.Image(images.ImageManager(None),
                            snapshot_dict_with_volume)
    TEST.snapshots.add(api.glance.Image(snapshot))

    # Images
    image_dict = {
        'id': '007e7d55-fe1e-4c5c-bf08-44b4a4964822',
        'name': 'public_image',
        'disk_format': u'qcow2',
        'status': "active",
        'size': 20 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': TEST.tenant.id,
        'container_format': 'novaImage',
        'properties': {
            'image_type': u'image'
        },
        'is_public': True,
        'protected': False,
        'min_ram': 0,
        'created_at': '2014-02-14T20:56:53'
    }
    public_image = images.Image(images.ImageManager(None), image_dict)

    image_dict = {
        'id': 'a001c047-22f8-47d0-80a1-8ec94a9524fe',
        'name': 'private_image',
        'status': "active",
        'size': 10 * 1024**2,
        'virtual_size': 20 * 1024**2,
        'min_disk': 0,
        'owner': TEST.tenant.id,
        'container_format': 'aki',
        'is_public': False,
        'protected': False,
        'min_ram': 0,
        'created_at': '2014-03-14T12:56:53'
    }
    private_image = images.Image(images.ImageManager(None), image_dict)

    image_dict = {
        'id': 'd6936c86-7fec-474a-85c5-5e467b371c3c',
        'name': 'protected_images',
        'status': "active",
        'owner': TEST.tenant.id,
        'size': 2 * 1024**3,
        'virtual_size': None,
        'min_disk': 30,
        'container_format': 'novaImage',
        'properties': {
            'image_type': u'image'
        },
        'is_public': True,
        'protected': True,
        'min_ram': 0,
        'created_at': '2014-03-16T06:22:14'
    }
    protected_image = images.Image(images.ImageManager(None), image_dict)

    image_dict = {
        'id': '278905a6-4b52-4d1e-98f9-8c57bb25ba32',
        'name': None,
        'status': "active",
        'size': 5 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': TEST.tenant.id,
        'container_format': 'novaImage',
        'properties': {
            'image_type': u'image'
        },
        'is_public': True,
        'protected': False,
        'min_ram': 0
    }
    public_image2 = images.Image(images.ImageManager(None), image_dict)

    image_dict = {
        'id': '710a1acf-a3e3-41dd-a32d-5d6b6c86ea10',
        'name': 'private_image 2',
        'status': "active",
        'size': 30 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': TEST.tenant.id,
        'container_format': 'aki',
        'is_public': False,
        'protected': False,
        'min_ram': 0
    }
    private_image2 = images.Image(images.ImageManager(None), image_dict)

    image_dict = {
        'id': '7cd892fd-5652-40f3-a450-547615680132',
        'name': 'private_image 3',
        'status': "active",
        'size': 2 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': TEST.tenant.id,
        'container_format': 'aki',
        'is_public': False,
        'protected': False,
        'min_ram': 0
    }
    private_image3 = images.Image(images.ImageManager(None), image_dict)

    # A shared image. Not public and not local tenant.
    image_dict = {
        'id': 'c8756975-7a3b-4e43-b7f7-433576112849',
        'name': 'shared_image 1',
        'status': "active",
        'size': 8 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': 'someothertenant',
        'container_format': 'aki',
        'is_public': False,
        'protected': False,
        'min_ram': 0
    }
    shared_image1 = images.Image(images.ImageManager(None), image_dict)

    # "Official" image. Public and tenant matches an entry
    # in IMAGES_LIST_FILTER_TENANTS.
    image_dict = {
        'id': 'f448704f-0ce5-4d34-8441-11b6581c6619',
        'name': 'official_image 1',
        'status': "active",
        'size': 2 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': 'officialtenant',
        'container_format': 'aki',
        'is_public': True,
        'protected': False,
        'min_ram': 0
    }
    official_image1 = images.Image(images.ImageManager(None), image_dict)

    image_dict = {
        'id': 'a67e7d45-fe1e-4c5c-bf08-44b4a4964822',
        'name': 'multi_prop_image',
        'status': "active",
        'size': 20 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': TEST.tenant.id,
        'container_format': 'novaImage',
        'properties': {
            'description': u'a multi prop image',
            'foo': u'foo val',
            'bar': u'bar val'
        },
        'is_public': True,
        'protected': False
    }
    multi_prop_image = images.Image(images.ImageManager(None), image_dict)

    # An image without name being returned based on current api
    image_dict = {
        'id': 'c8756975-7a3b-4e43-b7f7-433576112849',
        'status': "active",
        'size': 8 * 1024**3,
        'virtual_size': None,
        'min_disk': 0,
        'owner': 'someothertenant',
        'container_format': 'aki',
        'is_public': False,
        'protected': False
    }
    no_name_image = images.Image(images.ImageManager(None), image_dict)

    TEST.images_api.add(public_image, private_image, protected_image,
                        public_image2, private_image2, private_image3,
                        shared_image1, official_image1, multi_prop_image)

    TEST.images.add(api.glance.Image(public_image),
                    api.glance.Image(private_image),
                    api.glance.Image(protected_image),
                    api.glance.Image(public_image2),
                    api.glance.Image(private_image2),
                    api.glance.Image(private_image3),
                    api.glance.Image(shared_image1),
                    api.glance.Image(official_image1),
                    api.glance.Image(multi_prop_image))

    TEST.empty_name_image = api.glance.Image(no_name_image)

    image_v2_dicts = [{
        'checksum':
        'eb9139e4942121f22bbc2afc0400b2a4',
        'container_format':
        'novaImage',
        'created_at':
        '2014-02-14T20:56:53',
        'direct_url':
        'swift+config://ref1/glance/'
        'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5',
        'disk_format':
        u'qcow2',
        'file':
        '/v2/images/'
        'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5/file',
        'id':
        '007e7d55-fe1e-4c5c-bf08-44b4a4964822',
        'kernel_id':
        'f6ebd5f0-b110-4406-8c1e-67b28d4e85e7',
        'locations': [{
            'metadata': {},
            'url':
            'swift+config://ref1/glance/'
            'da8500d5-8b80-4b9c-8410-cc57fb8fb9d5'
        }],
        'min_ram':
        0,
        'name':
        'public_image',
        'image_type':
        u'image',
        'min_disk':
        0,
        'owner':
        TEST.tenant.id,
        'protected':
        False,
        'ramdisk_id':
        '868efefc-4f2d-4ed8-82b1-7e35576a7a47',
        'size':
        20 * 1024**3,
        'status':
        'active',
        'tags': ['active_image'],
        'updated_at':
        '2015-08-31T19:37:45Z',
        'virtual_size':
        None,
        'visibility':
        'public'
    }, {
        'checksum': None,
        'container_format': 'novaImage',
        'created_at': '2014-03-16T06:22:14',
        'disk_format': None,
        'image_type': u'image',
        'file': '/v2/images/885d1cb0-9f5c-4677-9d03-175be7f9f984/file',
        'id': 'd6936c86-7fec-474a-85c5-5e467b371c3c',
        'locations': [],
        'min_disk': 30,
        'min_ram': 0,
        'name': 'protected_images',
        'owner': TEST.tenant.id,
        'protected': True,
        'size': 2 * 1024**3,
        'status': "active",
        'tags': ['empty_image'],
        'updated_at': '2015-09-01T22:37:32Z',
        'virtual_size': None,
        'visibility': 'public'
    }, {
        'checksum': 'e533283e6aac072533d1d091a7d2e413',
        'container_format': 'novaImage',
        'created_at': '2015-09-02T00:31:16Z',
        'disk_format': 'qcow2',
        'file': '/v2/images/10ca6b6b-48f4-43ac-8159-aa9e9353f5e4/file',
        'id': 'a67e7d45-fe1e-4c5c-bf08-44b4a4964822',
        'image_type': 'an image type',
        'min_disk': 0,
        'min_ram': 0,
        'name': 'multi_prop_image',
        'owner': TEST.tenant.id,
        'protected': False,
        'size': 20 * 1024**3,
        'status': 'active',
        'tags': ['custom_property_image'],
        'updated_at': '2015-09-02T00:31:17Z',
        'virtual_size': None,
        'visibility': 'public',
        'description': u'a multi prop image',
        'foo': u'foo val',
        'bar': u'bar val'
    }]
    for fixture in image_v2_dicts:
        apiresource = APIResourceV2(fixture)
        TEST.imagesV2.add(api.glance.Image(apiresource))

    snapshot_v2_dict = {
        'checksum': None,
        'container_format': 'novaImage',
        'created_at': '2018-02-26T22:50:56Z',
        'disk_format': None,
        'block_device_mapping': '[{"source_type": "snapshot"}]',
        'file': '/v2/images/c701226a-aa32-4064-bd36-e85a3dcc61aa/file',
        'id': 'c701226a-aa32-4064-bd36-e85a3dcc61aa',
        'locations': [],
        'min_disk': 30,
        'min_ram': 0,
        'name': 'snpashot_with_volume',
        'owner': TEST.tenant.id,
        'protected': True,
        'size': 2 * 1024**3,
        'status': "active",
        'tags': ['empty_image'],
        'updated_at': '2018-02-26T22:50:56Z',
        'virtual_size': None,
        'visibility': 'public'
    }
    TEST.snapshotsV2.add(api.glance.Image(APIResourceV2(snapshot_v2_dict)))

    metadef_dict = {
        'namespace':
        'namespace_1',
        'display_name':
        'Namespace 1',
        'description':
        'Mock desc 1',
        'resource_type_associations': [{
            'created_at': '2014-08-21T08:39:43Z',
            'prefix': 'mock',
            'name': 'mock name'
        }],
        'visibility':
        'public',
        'protected':
        True,
        'created_at':
        '2014-08-21T08:39:43Z',
        'properties': {
            'cpu_mock:mock': {
                'default': '1',
                'type': 'integer',
                'description': 'Number of mocks.',
                'title': 'mocks'
            }
        }
    }
    metadef = Namespace(metadef_dict)
    TEST.metadata_defs.add(metadef)

    metadef_dict = {
        'namespace':
        'namespace_2',
        'display_name':
        'Namespace 2',
        'description':
        'Mock desc 2',
        'resource_type_associations': [{
            'created_at': '2014-08-21T08:39:43Z',
            'prefix': 'mock',
            'name': 'mock name'
        }],
        'visibility':
        'private',
        'protected':
        False,
        'created_at':
        '2014-08-21T08:39:43Z',
        'properties': {
            'hdd_mock:mock': {
                'default': '2',
                'type': 'integer',
                'description': 'Number of mocks.',
                'title': 'mocks'
            }
        }
    }
    metadef = Namespace(metadef_dict)
    TEST.metadata_defs.add(metadef)

    metadef_dict = {
        'namespace':
        'namespace_3',
        'display_name':
        'Namespace 3',
        'description':
        'Mock desc 3',
        'resource_type_associations': [{
            'created_at': '2014-08-21T08:39:43Z',
            'prefix': 'mock',
            'name': 'mock name'
        }],
        'visibility':
        'public',
        'protected':
        False,
        'created_at':
        '2014-08-21T08:39:43Z',
        'properties': {
            'gpu_mock:mock': {
                'default': '2',
                'type': 'integer',
                'description': 'Number of mocks.',
                'title': 'mocks'
            }
        }
    }
    metadef = Namespace(metadef_dict)
    TEST.metadata_defs.add(metadef)

    metadef_dict = {
        'namespace':
        'namespace_4',
        'display_name':
        'Namespace 4',
        'description':
        'Mock desc 4',
        'resource_type_associations': [{
            'created_at': '2014-08-21T08:39:43Z',
            'prefix': 'mock',
            'name': 'OS::Cinder::Volume',
            'properties_target': 'user'
        }],
        'visibility':
        'public',
        'protected':
        True,
        'created_at':
        '2014-08-21T08:39:43Z',
        'properties': {
            'ram_mock:mock': {
                'default': '2',
                'type': 'integer',
                'description': 'Number of mocks.',
                'title': 'mocks'
            }
        }
    }
    metadef = Namespace(metadef_dict)
    TEST.metadata_defs.add(metadef)
Exemplo n.º 12
0
def data(TEST):
    cluster1 = clusters.Cluster(clusters.Clusters(None),
                                CLUSTER_DATA_ONE)
    cluster2 = clusters.Cluster(clusters.Clusters(None),
                                CLUSTER_DATA_TWO)
    database1 = instances.Instance(instances.Instances(None),
                                   DATABASE_DATA_ONE)
    database2 = instances.Instance(instances.Instances(None),
                                   DATABASE_DATA_TWO)
    database3 = instances.Instance(instances.Instances(None),
                                   DATABASE_DATA_THREE)
    bkup1 = backups.Backup(backups.Backups(None), BACKUP_ONE)
    bkup2 = backups.Backup(backups.Backups(None), BACKUP_TWO)
    bkup3 = backups.Backup(backups.Backups(None), BACKUP_TWO_INC)

    cfg1 = configurations.Configuration(configurations.Configurations(None),
                                        CONFIG_ONE)
    cfg2 = configurations.Configuration(configurations.Configurations(None),
                                        CONFIG_TWO)

    user1 = users.User(users.Users(None), USER_ONE)
    user_db1 = databases.Database(databases.Databases(None),
                                  USER_DB_ONE)
    user_root1 = databases.Database(databases.Databases(None),
                                    USER_ROOT_ONE)

    datastore1 = datastores.Datastore(datastores.Datastores(None),
                                      DATASTORE_ONE)
    version1 = datastores.\
        DatastoreVersion(datastores.DatastoreVersions(None),
                         VERSION_ONE)
    version2 = datastores.\
        DatastoreVersion(datastores.DatastoreVersions(None),
                         VERSION_TWO)

    flavor1 = flavors.Flavor(flavors.FlavorManager(None), FLAVOR_ONE)
    flavor2 = flavors.Flavor(flavors.FlavorManager(None), FLAVOR_TWO)
    flavor3 = flavors.Flavor(flavors.FlavorManager(None), FLAVOR_THREE)
    datastore_mongodb = datastores.Datastore(datastores.Datastores(None),
                                             DATASTORE_MONGODB)
    version_mongodb_2_6 = datastores.\
        DatastoreVersion(datastores.DatastoreVersions(None),
                         VERSION_MONGODB_2_6)
    datastore_redis = datastores.Datastore(datastores.Datastores(None),
                                           DATASTORE_REDIS)
    version_redis_3_0 = datastores.\
        DatastoreVersion(datastores.DatastoreVersions(None),
                         VERSION_REDIS_3_0)
    datastore_vertica = datastores.Datastore(datastores.Datastores(None),
                                             DATASTORE_VERTICA)
    version_vertica_7_1 = datastores.\
        DatastoreVersion(datastores.DatastoreVersions(None),
                         VERSION_VERTICA_7_1)

    log1 = instances.DatastoreLog(instances.Instances(None), LOG_1)
    log2 = instances.DatastoreLog(instances.Instances(None), LOG_2)
    log3 = instances.DatastoreLog(instances.Instances(None), LOG_3)
    log4 = instances.DatastoreLog(instances.Instances(None), LOG_4)

    TEST.trove_clusters = utils.TestDataContainer()
    TEST.trove_clusters.add(cluster1)
    TEST.trove_clusters.add(cluster2)
    TEST.databases = utils.TestDataContainer()
    TEST.database_backups = utils.TestDataContainer()
    TEST.database_configurations = utils.TestDataContainer()
    TEST.database_users = utils.TestDataContainer()
    TEST.database_user_dbs = utils.TestDataContainer()
    TEST.database_user_roots = utils.TestDataContainer()
    TEST.database_flavors = utils.TestDataContainer()

    TEST.databases.add(database1)
    TEST.databases.add(database2)
    TEST.databases.add(database3)
    TEST.database_backups.add(bkup1)
    TEST.database_backups.add(bkup2)
    TEST.database_backups.add(bkup3)

    TEST.database_configurations.add(cfg1)
    TEST.database_configurations.add(cfg2)

    TEST.configuration_parameters = utils.TestDataContainer()
    for parameter in CONFIG_PARAMS_ONE:
        TEST.configuration_parameters.add(
            configurations.ConfigurationParameter(
                configurations.ConfigurationParameters(None), parameter))

    TEST.configuration_instances = utils.TestDataContainer()
    TEST.configuration_instances.add(
        configurations.Configuration(
            configurations.Configurations(None), CONFIG_INSTANCE_ONE))

    TEST.database_users.add(user1)
    TEST.database_user_dbs.add(user_db1)
    TEST.database_user_roots.add(user_root1)
    TEST.datastores = utils.TestDataContainer()
    TEST.datastores.add(datastore_mongodb)
    TEST.datastores.add(datastore_redis)
    TEST.datastores.add(datastore_vertica)
    TEST.datastores.add(datastore1)
    TEST.database_flavors.add(flavor1, flavor2, flavor3)
    TEST.datastore_versions = utils.TestDataContainer()
    TEST.datastore_versions.add(version_vertica_7_1)
    TEST.datastore_versions.add(version_redis_3_0)
    TEST.datastore_versions.add(version_mongodb_2_6)
    TEST.datastore_versions.add(version1)
    TEST.datastore_versions.add(version2)

    TEST.logs = utils.TestDataContainer()
    TEST.logs.add(log1, log2, log3, log4)
Exemplo n.º 13
0
def data(TEST):
    TEST.containers = utils.TestDataContainer()
    TEST.objects = utils.TestDataContainer()
    TEST.folder = utils.TestDataContainer()

    # '%' can break URL if not properly url-quoted
    # ' ' (space) can break 'Content-Disposition' if not properly
    # double-quoted

    container_dict_1 = {
        "name": u"container one%\u6346",
        "container_object_count": 2,
        "container_bytes_used": 256,
        "timestamp": timeutils.isotime(),
        "is_public": False,
        "public_url": ""
    }
    container_1 = swift.Container(container_dict_1)
    container_2_name = u"container_two\u6346"
    container_dict_2 = {
        "name":
        container_2_name,
        "container_object_count":
        4,
        "container_bytes_used":
        1024,
        "timestamp":
        timeutils.isotime(),
        "is_public":
        True,
        "public_url":
        "http://public.swift.example.com:8080/" +
        "v1/project_id/%s" % utils_http.urlquote(container_2_name)
    }
    container_2 = swift.Container(container_dict_2)
    container_dict_3 = {
        "name": u"container,three%\u6346",
        "container_object_count": 2,
        "container_bytes_used": 256,
        "timestamp": timeutils.isotime(),
        "is_public": False,
        "public_url": ""
    }
    container_3 = swift.Container(container_dict_3)
    TEST.containers.add(container_1, container_2, container_3)

    object_dict = {
        "name": u"test object%\u6346",
        "content_type": u"text/plain",
        "bytes": 128,
        "timestamp": timeutils.isotime(),
        "last_modified": None,
        "hash": u"object_hash"
    }
    object_dict_2 = {
        "name": u"test_object_two\u6346",
        "content_type": u"text/plain",
        "bytes": 128,
        "timestamp": timeutils.isotime(),
        "last_modified": None,
        "hash": u"object_hash_2"
    }
    object_dict_3 = {
        "name": u"test,object_three%\u6346",
        "content_type": u"text/plain",
        "bytes": 128,
        "timestamp": timeutils.isotime(),
        "last_modified": None,
        "hash": u"object_hash"
    }
    object_dict_4 = {
        "name": u"test.txt",
        "content_type": u"text/plain",
        "bytes": 128,
        "timestamp": timeutils.isotime(),
        "last_modified": None,
        "hash": u"object_hash"
    }
    obj_dicts = [object_dict, object_dict_2, object_dict_3, object_dict_4]
    obj_data = "Fake Data"

    for obj_dict in obj_dicts:
        swift_object = swift.StorageObject(obj_dict,
                                           container_1.name,
                                           data=obj_data)
        TEST.objects.add(swift_object)

    folder_dict = {
        "name": u"test folder%\u6346",
        "content_type": u"text/plain",
        "bytes": 128,
        "timestamp": timeutils.isotime(),
        "_table_data_type": u"subfolders",
        "last_modified": None,
        "hash": u"object_hash"
    }

    TEST.folder.add(swift.PseudoFolder(folder_dict, container_1.name))
Exemplo n.º 14
0
def data(TEST):
    TEST.stacks = utils.TestDataContainer()
    TEST.stack_templates = utils.TestDataContainer()
    TEST.stack_environments = utils.TestDataContainer()
    TEST.resource_types = utils.TestDataContainer()
    TEST.heat_services = utils.TestDataContainer()

    # Services
    service_1 = services.Service(
        services.ServiceManager(None), {
            "status": "up",
            "binary": "heat-engine",
            "report_interval": 60,
            "engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d1",
            "created_at": "2015-02-06T03:23:32.000000",
            "hostname": "mrkanag",
            "updated_at": "2015-02-20T09:49:52.000000",
            "topic": "engine",
            "host": "engine-1",
            "deleted_at": None,
            "id": "1efd7015-5016-4caa-b5c8-12438af7b100"
        })

    service_2 = services.Service(
        services.ServiceManager(None), {
            "status": "up",
            "binary": "heat-engine",
            "report_interval": 60,
            "engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d2",
            "created_at": "2015-02-06T03:23:32.000000",
            "hostname": "mrkanag",
            "updated_at": "2015-02-20T09:49:52.000000",
            "topic": "engine",
            "host": "engine-2",
            "deleted_at": None,
            "id": "1efd7015-5016-4caa-b5c8-12438af7b100"
        })

    TEST.heat_services.add(service_1)
    TEST.heat_services.add(service_2)

    # Data return by heatclient.
    TEST.api_resource_types = utils.TestDataContainer()

    for i in range(10):
        stack_data = {
            "description":
            "No description",
            "links": [{
                "href":
                "http://192.168.1.70:8004/v1/"
                "051c727ee67040d6a7b7812708485a97/"
                "stacks/stack-1211-38/"
                "05b4f39f-ea96-4d91-910c-e758c078a089",
                "rel":
                "self"
            }],
            "parameters": {
                'DBUsername':
                '******',
                'InstanceType':
                'm1.small',
                'AWS::StackId':
                ('arn:openstack:heat::2ce287:stacks/teststack/88553ec'),
                'DBRootPassword':
                '******',
                'AWS::StackName':
                "teststack{0}".format(i),
                'DBPassword':
                '******',
                'AWS::Region':
                'ap-southeast-1',
                'DBName':
                u'wordpress'
            },
            "stack_status_reason":
            "Stack successfully created",
            "stack_name":
            "stack-test{0}".format(i),
            "creation_time":
            "2013-04-22T00:11:39Z",
            "updated_time":
            "2013-04-22T00:11:39Z",
            "stack_status":
            "CREATE_COMPLETE",
            "id":
            "05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i)
        }
        stack = stacks.Stack(stacks.StackManager(None), stack_data)
        TEST.stacks.add(stack)

    TEST.stack_templates.add(Template(TEMPLATE, VALIDATE))
    TEST.stack_environments.add(Environment(ENVIRONMENT))

    # Resource types list
    r_type_1 = {
        "resource_type": "AWS::CloudFormation::Stack",
        "attributes": {},
        "properties": {
            "Parameters": {
                "description":
                "The set of parameters passed to this nested stack.",
                "immutable": False,
                "required": False,
                "type": "map",
                "update_allowed": True
            },
            "TemplateURL": {
                "description": "The URL of a template that specifies"
                " the stack to be created as a resource.",
                "immutable": False,
                "required": True,
                "type": "string",
                "update_allowed": True
            },
            "TimeoutInMinutes": {
                "description": "The length of time, in minutes,"
                " to wait for the nested stack creation.",
                "immutable": False,
                "required": False,
                "type": "number",
                "update_allowed": True
            }
        }
    }

    r_type_2 = {
        "resource_type": "OS::Heat::CloudConfig",
        "attributes": {
            "config": {
                "description": "The config value of the software config."
            }
        },
        "properties": {
            "cloud_config": {
                "description": "Map representing the cloud-config data"
                " structure which will be formatted as YAML.",
                "immutable": False,
                "required": False,
                "type": "map",
                "update_allowed": False
            }
        }
    }

    r_types_list = [r_type_1, r_type_2]

    for rt in r_types_list:
        r_type = resource_types.ResourceType(
            resource_types.ResourceTypeManager(None), rt['resource_type'])
        TEST.resource_types.add(r_type)
        TEST.api_resource_types.add(rt)
Exemplo n.º 15
0
def data(TEST):
    # MistralActions
    TEST.mistralclient_actions = test_data_utils.TestDataContainer()
    action_1 = actions.Action(
        actions.ActionManager(None), {
            'name': 'a',
            'is_system': True,
            'input': 'param1',
            'description': 'my cool action',
            'tags': ['test'],
            'created_at': '1',
            'updated_at': '1'
        })
    TEST.mistralclient_actions.add(action_1)

    # MistralExecutions
    TEST.mistralclient_executions = test_data_utils.TestDataContainer()
    execution_1 = executions.Execution(
        executions.ExecutionManager(None), {
            'id': '123',
            'workflow_name': 'my_wf',
            'description': '',
            'state': 'RUNNING',
            'input': {
                'person': {
                    'first_name': 'John',
                    'last_name': 'Doe'
                }
            }
        })
    TEST.mistralclient_executions.add(execution_1)

    # Tasks
    TEST.mistralclient_tasks = test_data_utils.TestDataContainer()
    task_1 = tasks.Task(
        tasks.TaskManager(None), {
            'id': '1',
            'workflow_execution_id': '123',
            'name': 'my_task',
            'workflow_name': 'my_wf',
            'state': 'RUNNING',
            'tags': ['deployment', 'demo'],
            'result': {
                'some': 'result'
            }
        })
    TEST.mistralclient_tasks.add(task_1)

    # Workbooks
    TEST.mistralclient_workbooks = test_data_utils.TestDataContainer()
    workbook_1 = workbooks.Workbook(
        workbooks.WorkbookManager(None), {
            'name': 'a',
            'tags': ['a', 'b'],
            'created_at': '1',
            'updated_at': '1',
            'definition': WB_DEF
        })
    TEST.mistralclient_workbooks.add(workbook_1)

    # Workflows
    TEST.mistralclient_workflows = test_data_utils.TestDataContainer()
    workflow_1 = workflows.Workflow(
        workflows.WorkflowManager(None), {
            'name': 'a',
            'tags': ['a', 'b'],
            'input': 'param',
            'created_at': '1',
            'updated_at': '1',
            'definition': WF_DEF
        })
    TEST.mistralclient_workflows.add(workflow_1)

    # MistralActionsExecutions
    TEST.mistralclient_action_executions = test_data_utils.TestDataContainer()
    action_executions_1 = action_executions.ActionExecution(
        action_executions.ActionExecutionManager(None), {
            'id': '1',
            'name': 'a',
            'tags': ['a', 'b'],
            'workflow_name': 'my work flow',
            'task_execution_id': '1',
            'task_name': 'b',
            'description': '',
            'created_at': '1',
            'updated_at': '1',
            'accepted': True,
            'state': 'RUNNING'
        })
    TEST.mistralclient_action_executions.add(action_executions_1)

    # MistralCronTriggers
    TEST.mistralclient_cron_triggers = test_data_utils.TestDataContainer()
    cron_triggers_1 = cron_triggers.CronTrigger(
        cron_triggers.CronTriggerManager(None), {
            'id': '1',
            'name': 'a',
            'workflow_name': 'my work flow',
            'pattern': '',
            'next_execution_time': '',
            'remaining_executions': '',
            'first_execution_time': '',
            'created_at': '1',
            'updated_at': '1'
        })
    TEST.mistralclient_cron_triggers.add(cron_triggers_1)
Exemplo n.º 16
0
def data(TEST):
    TEST.cinder_services = utils.TestDataContainer()
    TEST.cinder_volumes = utils.TestDataContainer()
    TEST.cinder_volume_backups = utils.TestDataContainer()
    TEST.cinder_volume_types = utils.TestDataContainer()
    TEST.cinder_volume_snapshots = utils.TestDataContainer()
    TEST.cinder_quotas = utils.TestDataContainer()
    TEST.cinder_quota_usages = utils.TestDataContainer()
    TEST.cinder_availability_zones = utils.TestDataContainer()

    # Services
    service_1 = services.Service(services.ServiceManager(None), {
        "service": "cinder-scheduler",
        "status": "enabled",
        "binary": "cinder-scheduler",
        "zone": "internal",
        "state": "up",
        "updated_at": "2013-07-08T05:21:00.000000",
        "host": "devstack001",
        "disabled_reason": None
    })

    service_2 = services.Service(services.ServiceManager(None), {
        "service": "cinder-volume",
        "status": "enabled",
        "binary": "cinder-volume",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T05:20:51.000000",
        "host": "devstack001",
        "disabled_reason": None
    })
    TEST.cinder_services.add(service_1)
    TEST.cinder_services.add(service_2)

    # Volumes - Cinder v1
    volume = volumes.Volume(volumes.VolumeManager(None),
                            {'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
                             'status': 'available',
                             'size': 40,
                             'display_name': 'Volume name',
                             'display_description': 'Volume description',
                             'created_at': '2014-01-27 10:30:00',
                             'volume_type': None,
                             'attachments': []})
    nameless_volume = volumes.Volume(volumes.VolumeManager(None),
                         dict(id="4b069dd0-6eaa-4272-8abc-5448a68f1cce",
                              status='available',
                              size=10,
                              display_name='',
                              display_description='',
                              device="/dev/hda",
                              created_at='2010-11-21 18:34:25',
                              volume_type='vol_type_1',
                              attachments=[]))
    other_volume = volumes.Volume(volumes.VolumeManager(None),
                            {'id': "21023e92-8008-1234-8059-7f2293ff3889",
                             'status': 'in-use',
                             'size': 10,
                             'display_name': u'my_volume',
                             'display_description': '',
                             'created_at': '2013-04-01 10:30:00',
                             'volume_type': None,
                             'attachments': [{"id": "1", "server_id": '1',
                                            "device": "/dev/hda"}]})

    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    other_volume.bootable = 'true'

    TEST.cinder_volumes.add(api.cinder.Volume(volume))
    TEST.cinder_volumes.add(api.cinder.Volume(nameless_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(other_volume))

    vol_type1 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
                                        {'id': u'1',
                                         'name': u'vol_type_1',
                                         'extra_specs': {'foo': 'bar'}})
    vol_type2 = volume_types.VolumeType(volume_types.VolumeTypeManager(None),
                                        {'id': u'2',
                                         'name': u'vol_type_2'})
    TEST.cinder_volume_types.add(vol_type1, vol_type2)

    # Volumes - Cinder v2
    volume_v2 = volumes_v2.Volume(volumes_v2.VolumeManager(None),
                            {'id': "31023e92-8008-4c8b-8059-7f2293ff1234",
                             'name': 'v2_volume',
                             'description': "v2 Volume Description",
                             'status': 'available',
                             'size': 20,
                             'created_at': '2014-01-27 10:30:00',
                             'volume_type': None,
                             'bootable': 'true',
                             'attachments': []})
    volume_v2.bootable = 'true'

    TEST.cinder_volumes.add(api.cinder.Volume(volume_v2))

    snapshot = vol_snaps.Snapshot(vol_snaps.SnapshotManager(None),
                        {'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
                         'display_name': 'test snapshot',
                         'display_description': 'volume snapshot',
                         'size': 40,
                         'status': 'available',
                         'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
    snapshot2 = vol_snaps_v2.Snapshot(vol_snaps_v2.SnapshotManager(None),
                        {'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0f',
                         'name': '',
                         'description': 'v2 volume snapshot description',
                         'size': 80,
                         'status': 'available',
                         'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})

    snapshot.bootable = 'true'
    snapshot2.bootable = 'true'

    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot2))

    volume_backup1 = vol_backups.VolumeBackup(vol_backups.
                                              VolumeBackupManager(None),
                     {'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
                     'name': 'backup1',
                     'description': 'volume backup 1',
                     'size': 10,
                     'status': 'available',
                     'container_name': 'volumebackups',
                     'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})

    volume_backup2 = vol_backups.VolumeBackup(vol_backups.
                                              VolumeBackupManager(None),
                     {'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e52',
                     'name': 'backup2',
                     'description': 'volume backup 2',
                     'size': 20,
                     'status': 'available',
                     'container_name': 'volumebackups',
                     'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})

    TEST.cinder_volume_backups.add(volume_backup1)
    TEST.cinder_volume_backups.add(volume_backup2)

    # Quota Sets
    quota_data = dict(volumes='1',
                      snapshots='1',
                      gigabytes='1000')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.cinder_quotas.add(api.base.QuotaSet(quota))

    # Quota Usages
    quota_usage_data = {'gigabytes': {'used': 0,
                                      'quota': 1000},
                        'instances': {'used': 0,
                                      'quota': 10},
                        'snapshots': {'used': 0,
                                      'quota': 10}}
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(api.base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])

    TEST.cinder_quota_usages.add(quota_usage)

    # Availability Zones
    # Cinder returns the following structure from os-availability-zone
    # {"availabilityZoneInfo":
    # [{"zoneState": {"available": true}, "zoneName": "nova"}]}
    # Note that the default zone is still "nova" even though this is cinder
    TEST.cinder_availability_zones.add(
        availability_zones.AvailabilityZone(
            availability_zones.AvailabilityZoneManager(None),
            {
                'zoneName': 'nova',
                'zoneState': {'available': True}
            }
        )
    )
    # Cinder Limits
    limits = {"absolute": {"totalVolumesUsed": 1,
                           "totalGigabytesUsed": 5,
                           "maxTotalVolumeGigabytes": 1000,
                           "maxTotalVolumes": 10}}
    TEST.cinder_limits = limits
Exemplo n.º 17
0
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import logging

from django.utils.translation import ugettext_lazy as _
from horizon.utils import memoized
from openstack_dashboard.test.test_data import utils as test_utils

from tuskar_ui.cached_property import cached_property  # noqa
from tuskar_ui.handle_errors import handle_errors  # noqa
from tuskar_ui.test.test_data import flavor_data
from tuskar_ui.test.test_data import heat_data

TEST_DATA = test_utils.TestDataContainer()
flavor_data.data(TEST_DATA)
heat_data.data(TEST_DATA)

LOG = logging.getLogger(__name__)


class Flavor(object):
    def __init__(self, flavor):
        """Construct by wrapping Nova flavor

        :param flavor: Nova flavor
        :type  flavor: novaclient.v1_1.flavors.Flavor
        """
        self._flavor = flavor
Exemplo n.º 18
0
def data(TEST):
    # Make a deep copy of the catalog to avoid persisting side-effects
    # when tests modify the catalog.
    TEST.service_catalog = copy.deepcopy(SERVICE_CATALOG)
    TEST.tokens = utils.TestDataContainer()
    TEST.domains = utils.TestDataContainer()
    TEST.users = utils.TestDataContainer()
    TEST.groups = utils.TestDataContainer()
    TEST.tenants = utils.TestDataContainer()
    TEST.roles = utils.TestDataContainer()
    TEST.ec2 = utils.TestDataContainer()

    admin_role_dict = {'id': '1', 'name': 'admin'}
    admin_role = roles.Role(roles.RoleManager, admin_role_dict)
    member_role_dict = {
        'id': "2",
        'name': settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE
    }
    member_role = roles.Role(roles.RoleManager, member_role_dict)
    TEST.roles.add(admin_role, member_role)
    TEST.roles.admin = admin_role
    TEST.roles.member = member_role

    domain_dict = {
        'id': "1",
        'name': 'test_domain',
        'description': "a test domain.",
        'enabled': True
    }
    domain_dict_2 = {
        'id': "2",
        'name': 'disabled_domain',
        'description': "a disabled test domain.",
        'enabled': False
    }
    domain = domains.Domain(domains.DomainManager, domain_dict)
    disabled_domain = domains.Domain(domains.DomainManager, domain_dict_2)
    TEST.domains.add(domain, disabled_domain)
    TEST.domain = domain  # Your "current" domain

    user_dict = {
        'id': "1",
        'name': 'test_user',
        'email': '*****@*****.**',
        'password': '******',
        'token': 'test_token',
        'project_id': '1',
        'enabled': True,
        'domain_id': "1"
    }
    user = users.User(users.UserManager(None), user_dict)
    user_dict = {
        'id': "2",
        'name': 'user_two',
        'email': '*****@*****.**',
        'password': '******',
        'token': 'test_token',
        'project_id': '1',
        'enabled': True,
        'domain_id': "1"
    }
    user2 = users.User(users.UserManager(None), user_dict)
    user_dict = {
        'id': "3",
        'name': 'user_three',
        'email': '*****@*****.**',
        'password': '******',
        'token': 'test_token',
        'project_id': '1',
        'enabled': True,
        'domain_id': "1"
    }
    user3 = users.User(users.UserManager(None), user_dict)
    user_dict = {
        'id': "4",
        'name': 'user_four',
        'email': '*****@*****.**',
        'password': '******',
        'token': 'test_token',
        'project_id': '2',
        'enabled': True,
        'domain_id': "2"
    }
    user4 = users.User(users.UserManager(None), user_dict)
    user_dict = {
        'id': "5",
        'name': 'user_five',
        'email': None,
        'password': '******',
        'token': 'test_token',
        'project_id': '2',
        'enabled': True,
        'domain_id': "1"
    }
    user5 = users.User(users.UserManager(None), user_dict)
    TEST.users.add(user, user2, user3, user4, user5)
    TEST.user = user  # Your "current" user
    TEST.user.service_catalog = copy.deepcopy(SERVICE_CATALOG)

    group_dict = {
        'id': "1",
        'name': 'group_one',
        'description': 'group one description',
        'project_id': '1',
        'domain_id': '1'
    }
    group = groups.Group(groups.GroupManager(None), group_dict)
    group_dict = {
        'id': "2",
        'name': 'group_two',
        'description': 'group two description',
        'project_id': '1',
        'domain_id': '1'
    }
    group2 = groups.Group(groups.GroupManager(None), group_dict)
    group_dict = {
        'id': "3",
        'name': 'group_three',
        'description': 'group three description',
        'project_id': '1',
        'domain_id': '1'
    }
    group3 = groups.Group(groups.GroupManager(None), group_dict)
    group_dict = {
        'id': "4",
        'name': 'group_four',
        'description': 'group four description',
        'project_id': '2',
        'domain_id': '2'
    }
    group4 = groups.Group(groups.GroupManager(None), group_dict)
    TEST.groups.add(group, group2, group3, group4)

    tenant_dict = {
        'id': "1",
        'name': 'test_tenant',
        'description': "a test tenant.",
        'enabled': True,
        'domain_id': '1',
        'domain_name': 'test_domain'
    }
    tenant_dict_2 = {
        'id': "2",
        'name': 'disabled_tenant',
        'description': "a disabled test tenant.",
        'enabled': False,
        'domain_id': '2',
        'domain_name': 'disabled_domain'
    }
    tenant_dict_3 = {
        'id': "3",
        'name': u'\u4e91\u89c4\u5219',
        'description': "an unicode-named tenant.",
        'enabled': True,
        'domain_id': '2',
        'domain_name': 'disabled_domain'
    }
    tenant = tenants.Tenant(tenants.TenantManager, tenant_dict)
    disabled_tenant = tenants.Tenant(tenants.TenantManager, tenant_dict_2)
    tenant_unicode = tenants.Tenant(tenants.TenantManager, tenant_dict_3)

    TEST.tenants.add(tenant, disabled_tenant, tenant_unicode)
    TEST.tenant = tenant  # Your "current" tenant

    tomorrow = datetime_safe.datetime.now() + timedelta(days=1)
    expiration = tomorrow.isoformat()

    scoped_token_dict = {
        'access': {
            'token': {
                'id': "test_token_id",
                'expires': expiration,
                'tenant': tenant_dict,
                'tenants': [tenant_dict]
            },
            'user': {
                'id': "test_user_id",
                'name': "test_user",
                'roles': [member_role_dict]
            },
            'serviceCatalog': TEST.service_catalog
        }
    }

    scoped_access_info = access.AccessInfo.factory(resp=None,
                                                   body=scoped_token_dict)

    unscoped_token_dict = {
        'access': {
            'token': {
                'id': "test_token_id",
                'expires': expiration
            },
            'user': {
                'id': "test_user_id",
                'name': "test_user",
                'roles': [member_role_dict]
            },
            'serviceCatalog': TEST.service_catalog
        }
    }
    unscoped_access_info = access.AccessInfo.factory(resp=None,
                                                     body=unscoped_token_dict)

    scoped_token = auth_user.Token(scoped_access_info)
    unscoped_token = auth_user.Token(unscoped_access_info)
    TEST.tokens.add(scoped_token, unscoped_token)
    TEST.token = scoped_token  # your "current" token.
    TEST.tokens.scoped_token = scoped_token
    TEST.tokens.unscoped_token = unscoped_token

    access_secret = ec2.EC2(ec2.CredentialsManager, {
        "access": "access",
        "secret": "secret"
    })
    TEST.ec2.add(access_secret)
Exemplo n.º 19
0
def data(TEST):
    TEST.plugins = utils.TestDataContainer()
    TEST.plugins_configs = utils.TestDataContainer()
    TEST.nodegroup_templates = utils.TestDataContainer()
    TEST.cluster_templates = utils.TestDataContainer()
    TEST.clusters = utils.TestDataContainer()
    TEST.data_sources = utils.TestDataContainer()
    TEST.job_binaries = utils.TestDataContainer()
    TEST.jobs = utils.TestDataContainer()
    TEST.job_executions = utils.TestDataContainer()
    TEST.registered_images = copy.copy(TEST.images)

    plugin1_dict = {
        "description": "vanilla plugin",
        "name": "vanilla",
        "title": "Vanilla Apache Hadoop",
        "versions": ["2.3.0", "1.2.1"]
    }

    plugin1 = plugins.Plugin(plugins.PluginManager(None), plugin1_dict)

    TEST.plugins.add(plugin1)

    plugin_config1_dict = {
        "node_processes": {
            "HDFS": ["namenode", "datanode", "secondarynamenode"],
            "MapReduce": ["tasktracker", "jobtracker"]
        },
        "description":
        "This plugin provides an ability to launch vanilla "
        "Apache Hadoop cluster without any management "
        "consoles.",
        "versions": ["1.2.1"],
        "required_image_tags": ["vanilla", "1.2.1"],
        "configs": [
            {
                "default_value": "/tmp/hadoop-${user.name}",
                "name": "hadoop.tmp.dir",
                "priority": 2,
                "config_type": "string",
                "applicable_target": "HDFS",
                "is_optional": True,
                "scope": "node",
                "description": "A base for other temporary directories."
            },
            {
                "default_value":
                True,
                "name":
                "hadoop.native.lib",
                "priority":
                2,
                "config_type":
                "bool",
                "applicable_target":
                "HDFS",
                "is_optional":
                True,
                "scope":
                "node",
                "description":
                "Should native hadoop libraries, if present, "
                "be used."
            },
        ],
        "title":
        "Vanilla Apache Hadoop",
        "name":
        "vanilla"
    }

    TEST.plugins_configs.add(
        plugins.Plugin(plugins.PluginManager(None), plugin_config1_dict))

    # Nodegroup_Templates.
    ngt1_dict = {
        "created_at":
        "2014-06-04 14:01:03.701243",
        "description":
        None,
        "flavor_id":
        "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
        "availability_zone":
        None,
        "floating_ip_pool":
        None,
        "auto_security_group":
        True,
        "security_groups": [],
        "hadoop_version":
        "1.2.1",
        "id":
        "c166dfcc-9cc7-4b48-adc9-f0946169bb36",
        "image_id":
        None,
        "name":
        "sample-template",
        "node_configs": {},
        "node_processes":
        ["namenode", "jobtracker", "secondarynamenode", "hiveserver", "oozie"],
        "plugin_name":
        "vanilla",
        "tenant_id":
        "429ad8447c2d47bc8e0382d244e1d1df",
        "updated_at":
        None,
        "volume_mount_prefix":
        "/volumes/disk",
        "volumes_per_node":
        0,
        "volumes_size":
        0,
        "security_groups": [],
        "volumes_availability_zone":
        None,
    }

    ngt1 = node_group_templates.NodeGroupTemplate(
        node_group_templates.NodeGroupTemplateManager(None), ngt1_dict)

    TEST.nodegroup_templates.add(ngt1)

    # Cluster_templates.
    ct1_dict = {
        "anti_affinity": [],
        "cluster_configs": {},
        "created_at":
        "2014-06-04 14:01:06.460711",
        "default_image_id":
        None,
        "description":
        None,
        "hadoop_version":
        "1.2.1",
        "id":
        "a2c3743f-31a2-4919-8d02-792138a87a98",
        "name":
        "sample-cluster-template",
        "neutron_management_network":
        None,
        "node_groups": [{
            "count":
            1,
            "created_at":
            "2014-06-04 14:01:06.462512",
            "flavor_id":
            "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
            "floating_ip_pool":
            None,
            "image_id":
            None,
            "name":
            "master",
            "node_configs": {},
            "node_group_template_id":
            "c166dfcc-9cc7-4b48-adc9",
            "node_processes": [
                "namenode", "jobtracker", "secondarynamenode", "hiveserver",
                "oozie"
            ],
            "updated_at":
            None,
            "volume_mount_prefix":
            "/volumes/disk",
            "volumes_per_node":
            0,
            "volumes_size":
            0,
            "volumes_availability_zone":
            None,
        }, {
            "count": 2,
            "created_at": "2014-06-04 14:01:06.463214",
            "flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
            "floating_ip_pool": None,
            "image_id": None,
            "name": "workers",
            "node_configs": {},
            "node_group_template_id": "4eb5504c-94c9-4049-a440",
            "node_processes": ["datanode", "tasktracker"],
            "updated_at": None,
            "volume_mount_prefix": "/volumes/disk",
            "volumes_per_node": 0,
            "volumes_size": 0,
            "volumes_availability_zone": None,
        }],
        "plugin_name":
        "vanilla",
        "tenant_id":
        "429ad8447c2d47bc8e0382d244e1d1df",
        "updated_at":
        None
    }

    ct1 = cluster_templates.ClusterTemplate(
        cluster_templates.ClusterTemplateManager(None), ct1_dict)
    TEST.cluster_templates.add(ct1)

    # Clusters.
    cluster1_dict = {
        "anti_affinity": [],
        "cluster_configs": {},
        "cluster_template_id":
        "a2c3743f-31a2-4919-8d02-792138a87a98",
        "created_at":
        "2014-06-04 20:02:14.051328",
        "default_image_id":
        "9eb4643c-dca8-4ea7-92d2-b773f88a8dc6",
        "description":
        "",
        "hadoop_version":
        "1.2.1",
        "id":
        "ec9a0d28-5cfb-4028-a0b5-40afe23f1533",
        "info": {},
        "is_transient":
        False,
        "management_public_key":
        "fakekey",
        "name":
        "cercluster",
        "neutron_management_network":
        None,
        "node_groups": [{
            "count":
            1,
            "created_at":
            "2014-06-04 20:02:14.053153",
            "flavor_id":
            "0",
            "floating_ip_pool":
            None,
            "image_id":
            None,
            "instances": [{
                "created_at": "2014-06-04 20:02:14.834529",
                "id": "c3b8004b-7063-4b99-a082-820cdc6e961c",
                "instance_id": "a45f5495-4a10-4f17-8fae",
                "instance_name": "cercluster-master-001",
                "internal_ip": None,
                "management_ip": None,
                "updated_at": None,
                "volumes": []
            }],
            "name":
            "master",
            "node_configs": {},
            "node_group_template_id":
            "c166dfcc-9cc7-4b48-adc9",
            "node_processes": [
                "namenode", "jobtracker", "secondarynamenode", "hiveserver",
                "oozie"
            ],
            "updated_at":
            "2014-06-04 20:02:14.841760",
            "volume_mount_prefix":
            "/volumes/disk",
            "volumes_per_node":
            0,
            "volumes_size":
            0,
            "security_groups": [],
            "volumes_availability_zone":
            None,
        }, {
            "count":
            2,
            "created_at":
            "2014-06-04 20:02:14.053849",
            "flavor_id":
            "0",
            "floating_ip_pool":
            None,
            "image_id":
            None,
            "instances": [{
                "created_at": "2014-06-04 20:02:15.097655",
                "id": "6a8ae0b1-bb28-4de2-bfbb-bdd3fd2d72b2",
                "instance_id": "38bf8168-fb30-483f-8d52",
                "instance_name": "cercluster-workers-001",
                "internal_ip": None,
                "management_ip": None,
                "updated_at": None,
                "volumes": []
            }, {
                "created_at": "2014-06-04 20:02:15.344515",
                "id": "17b98ed3-a776-467a-90cf-9f46a841790b",
                "instance_id": "85606938-8e53-46a5-a50b",
                "instance_name": "cercluster-workers-002",
                "internal_ip": None,
                "management_ip": None,
                "updated_at": None,
                "volumes": []
            }],
            "name":
            "workers",
            "node_configs": {},
            "node_group_template_id":
            "4eb5504c-94c9-4049-a440",
            "node_processes": ["datanode", "tasktracker"],
            "updated_at":
            "2014-06-04 20:02:15.355745",
            "volume_mount_prefix":
            "/volumes/disk",
            "volumes_per_node":
            0,
            "volumes_size":
            0,
            "security_groups": ["b7857890-09bf-4ee0-a0d5-322d7a6978bf"],
            "volumes_availability_zone":
            None,
        }],
        "plugin_name":
        "vanilla",
        "status":
        "Active",
        "status_description":
        "",
        "tenant_id":
        "429ad8447c2d47bc8e0382d244e1d1df",
        "trust_id":
        None,
        "updated_at":
        "2014-06-04 20:02:15.446087",
        "user_keypair_id":
        "stackboxkp"
    }

    cluster1 = clusters.Cluster(clusters.ClusterManager(None), cluster1_dict)
    TEST.clusters.add(cluster1)

    # Data Sources.
    data_source1_dict = {
        "created_at": "2014-06-04 14:01:10.371562",
        "description": "sample output",
        "id": "426fb01c-5c7e-472d-bba2-b1f0fe7e0ede",
        "name": "sampleOutput",
        "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
        "type": "swift",
        "updated_at": None,
        "url": "swift://example.sahara/output"
    }

    data_source2_dict = {
        "created_at": "2014-06-05 15:01:12.331361",
        "description": "second sample output",
        "id": "ab3413-adfb-bba2-123456785675",
        "name": "sampleOutput2",
        "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
        "type": "hdfs",
        "updated_at": None,
        "url": "hdfs://example.sahara/output"
    }

    data_source1 = data_sources.DataSources(
        data_sources.DataSourceManager(None), data_source1_dict)
    data_source2 = data_sources.DataSources(
        data_sources.DataSourceManager(None), data_source2_dict)
    TEST.data_sources.add(data_source1)
    TEST.data_sources.add(data_source2)

    # Job Binaries.
    job_binary1_dict = {
        "created_at": "2014-06-05 18:15:15.581285",
        "description": "",
        "id": "3f3a07ac-7d6f-49e8-8669-40b25ee891b7",
        "name": "example.pig",
        "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
        "updated_at": None,
        "url": "internal-db://80121dea-f8bd-4ad3-bcc7-096f4bfc722d"
    }

    job_binary2_dict = {
        "created_at": "2014-10-10 13:12:15.583631",
        "description": "Test for spaces in name",
        "id": "abcdef56-1234-abcd-abcd-defabcdaedcb",
        "name": "example with spaces.pig",
        "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
        "updated_at": None,
        "url": "internal-db://abcdef56-1234-abcd-abcd-defabcdaedcb"
    }

    job_binary1 = job_binaries.JobBinaries(
        job_binaries.JobBinariesManager(None), job_binary1_dict)
    job_binary2 = job_binaries.JobBinaries(
        job_binaries.JobBinariesManager(None), job_binary2_dict)

    TEST.job_binaries.add(job_binary1)
    TEST.job_binaries.add(job_binary2)

    # Jobs.
    job1_dict = {
        "created_at":
        "2014-06-05 19:23:59.637165",
        "description":
        "",
        "id":
        "a077b851-46be-4ad7-93c3-2d83894546ef",
        "libs": [{
            "created_at": "2014-06-05 19:23:42.742057",
            "description": "",
            "id": "ab140807-59f8-4235-b4f2-e03daf946256",
            "name": "udf.jar",
            "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
            "updated_at": None,
            "url": "internal-db://d186e2bb-df93-47eb-8c0e-ce21e7ecb78b"
        }],
        "mains": [{
            "created_at": "2014-06-05 18:15:15.581285",
            "description": "",
            "id": "3f3a07ac-7d6f-49e8-8669-40b25ee891b7",
            "name": "example.pig",
            "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
            "updated_at": None,
            "url": "internal-db://80121dea-f8bd-4ad3-bcc7-096f4bfc722d"
        }],
        "name":
        "pigjob",
        "tenant_id":
        "429ad8447c2d47bc8e0382d244e1d1df",
        "type":
        "Pig",
        "updated_at":
        None
    }

    job1 = jobs.Job(jobs.JobsManager(None), job1_dict)
    TEST.jobs.add(job1)

    # Job Executions.
    jobex1_dict = {
        "cluster_id": "ec9a0d28-5cfb-4028-a0b5-40afe23f1533",
        "created_at": "2014-06-05 20:03:06.195937",
        "end_time": None,
        "id": "4b6c1cbf-c713-49d3-8025-808a87c514a6",
        "info": {
            "acl":
            None,
            "actions": [{
                "consoleUrl": "-",
                "cred": "None",
                "data": None,
                "endTime": "Thu,05 Jun 2014 20:03:32 GMT",
                "errorCode": None,
                "errorMessage": None,
                "externalChildIDs": None,
                "externalId": "-",
                "externalStatus": "OK",
                "id": "0000000-140604200538581-oozie-hado-W@:start:",
                "name": ":start:",
                "retries": 0,
                "startTime": "Thu,05 Jun 2014 20:03:32 GMT",
                "stats": None,
                "status": "OK",
                "toString": "Action name[:start:] status[OK]",
                "trackerUri": "-",
                "transition": "job-node",
                "type": ":START:"
            }, {
                "consoleUrl": "fake://console.url",
                "cred": "None",
                "data": None,
                "endTime": None,
                "errorCode": None,
                "errorMessage": None,
                "externalChildIDs": None,
                "externalId": "job_201406042004_0001",
                "externalStatus": "RUNNING",
                "id": "0000000-140604200538581-oozie-hado-W@job-node",
                "name": "job-node",
                "retries": 0,
                "startTime": "Thu,05 Jun 2014 20:03:33 GMT",
                "stats": None,
                "status": "RUNNING",
                "toString": "Action name[job-node] status[RUNNING]",
                "trackerUri": "cercluster-master-001:8021",
                "transition": None,
                "type": "pig"
            }],
            "appName":
            "job-wf",
            "appPath":
            "hdfs://fakepath/workflow.xml",
            "conf":
            "<configuration>fakeconfig</configuration>",
            "consoleUrl":
            "fake://consoleURL",
            "createdTime":
            "Thu,05 Jun 2014 20:03:32 GMT",
            "endTime":
            None,
            "externalId":
            None,
            "group":
            None,
            "id":
            "0000000-140604200538581-oozie-hado-W",
            "lastModTime":
            "Thu,05 Jun 2014 20:03:35 GMT",
            "parentId":
            None,
            "run":
            0,
            "startTime":
            "Thu,05 Jun 2014 20:03:32 GMT",
            "status":
            "RUNNING",
            "toString":
            "Workflow ...status[RUNNING]",
            "user":
            "******"
        },
        "input_id": "85884883-3083-49eb-b442-71dd3734d02c",
        "job_configs": {
            "args": [],
            "configs": {},
            "params": {}
        },
        "job_id": "a077b851-46be-4ad7-93c3-2d83894546ef",
        "oozie_job_id": "0000000-140604200538581-oozie-hado-W",
        "output_id": "426fb01c-5c7e-472d-bba2-b1f0fe7e0ede",
        "progress": None,
        "return_code": None,
        "start_time": "2014-06-05T16:03:32",
        "tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
        "updated_at": "2014-06-05 20:03:46.438248"
    }

    jobex1 = job_executions.JobExecution(
        job_executions.JobExecutionsManager(None), jobex1_dict)
    TEST.job_executions.add(jobex1)

    augmented_image = TEST.registered_images.first()
    augmented_image.tags = {}
    augmented_image.username = '******'
    augmented_image.description = 'mydescription'
Exemplo n.º 20
0
from mock import patch, call  # noqa

from openstack_dashboard.test import helpers
from openstack_dashboard.test.test_data import utils
from tuskar_ui import api
from tuskar_ui.handle_errors import handle_errors  # noqa
from tuskar_ui.test import helpers as test
from tuskar_ui.test.test_data import heat_data
from tuskar_ui.test.test_data import node_data
from tuskar_ui.test.test_data import tuskar_data

INDEX_URL = urlresolvers.reverse('horizon:infrastructure:nodes:index')
REGISTER_URL = urlresolvers.reverse('horizon:infrastructure:nodes:register')
DETAIL_VIEW = 'horizon:infrastructure:nodes:detail'
PERFORMANCE_VIEW = 'horizon:infrastructure:nodes:performance'
TEST_DATA = utils.TestDataContainer()
node_data.data(TEST_DATA)
heat_data.data(TEST_DATA)
tuskar_data.data(TEST_DATA)


class NodesTests(test.BaseAdminViewTests, helpers.APITestCase):
    @handle_errors("Error!", [])
    def _raise_tuskar_exception(self, request, *args, **kwargs):
        raise self.exceptions.tuskar

    def test_index_get(self):

        with patch(
                'tuskar_ui.api.node.Node',
                **{