Example #1
0
    def cleanup(self):
        # Stop the runner, shutdown the redis thread
        if self.kb_runner:
            try:
                self.kb_runner.dispose()
            except Exception:
                pass

        # Cleanup: start with tested side first
        # then testing side last (order is important because of the shared network)
        cleanup_flag = False
        try:
            cleanup_flag = self.kloud.delete_resources() if not self.storage_mode else True
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning('Some resources in server cloud are not cleaned up properly.')
            KBResLogger.dump_and_save('svr', self.kloud.res_logger.resource_list)

        cleanup_flag = False
        try:
            cleanup_flag = self.testing_kloud.delete_resources()
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning('Some resources in client cloud are not cleaned up properly.')
            KBResLogger.dump_and_save('clt', self.testing_kloud.res_logger.resource_list)

        # Set the kloud to None
        self.kloud = None
        self.testing_kloud = None
Example #2
0
    def __init__(self, scale_cfg, cred, reusing_tenants,
                 testing_side=False, storage_mode=False, multicast_mode=False):
        self.tenant_list = []
        self.testing_side = testing_side
        self.scale_cfg = scale_cfg
        self.reusing_tenants = reusing_tenants
        self.storage_mode = storage_mode
        self.multicast_mode = multicast_mode
        self.osclient_session = create_auth_session(cred)
        self.flavor_to_use = None
        self.vm_up_count = 0
        self.res_logger = KBResLogger()
        if testing_side:
            self.prefix = 'KBc'
            self.name = 'Client Kloud'
        else:
            self.prefix = 'KBs'
            self.name = 'Server Kloud'
        # pre-compute the placement az to use for all VMs
        self.placement_az = scale_cfg['availability_zone'] \
            if scale_cfg['availability_zone'] else None
        self.exc_info = None

        self.keystone = keystoneclient.Client(session=self.osclient_session,
                                              endpoint_type='publicURL')

        LOG.info("Creating kloud: " + self.prefix)
        if self.placement_az:
            LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))
Example #3
0
 def __init__(self,
              scale_cfg,
              cred,
              reusing_tenants,
              vm_img,
              testing_side=False,
              storage_mode=False,
              multicast_mode=False):
     self.tenant_list = []
     self.testing_side = testing_side
     self.scale_cfg = scale_cfg
     self.reusing_tenants = reusing_tenants
     self.storage_mode = storage_mode
     self.multicast_mode = multicast_mode
     self.credentials = cred
     self.osclient_session = cred.get_session()
     self.vm_up_count = 0
     self.res_logger = KBResLogger()
     self.vm_img = vm_img
     if testing_side:
         self.prefix = 'KBc'
         self.name = 'Client Kloud'
     else:
         self.prefix = 'KBs'
         self.name = 'Server Kloud'
     # pre-compute the placement az to use for all VMs
     self.placement_az = scale_cfg['availability_zone'] \
         if scale_cfg['availability_zone'] else None
     self.exc_info = None
     # these client handles use the kloudbuster credentials (usually admin)
     # to do tenant creation, tenant nova+cinder quota allocation and the like
     self.keystone = keystoneclient.Client(session=self.osclient_session)
     self.neutron_client = neutronclient.Client(
         '2.0', endpoint_type='publicURL', session=self.osclient_session)
     self.nova_client = novaclient.Client('2',
                                          endpoint_type='publicURL',
                                          session=self.osclient_session)
     self.cinder_client = cinderclient.Client('2',
                                              endpoint_type='publicURL',
                                              session=self.osclient_session)
     LOG.info("Creating kloud: " + self.prefix)
     if self.placement_az:
         LOG.info('%s Availability Zone: %s' %
                  (self.name, self.placement_az))
     # A dict of flavors indexed by flavor name
     self.flavors = {}
Example #4
0
    def cleanup(self):
        # Stop the runner, shutdown the redis thread
        if self.kb_runner:
            try:
                self.kb_runner.dispose()
            except Exception:
                pass

        # Cleanup: start with tested side first
        # then testing side last (order is important because of the shared network)
        cleanup_flag = False
        try:
            cleanup_flag = self.kloud.delete_resources(
            ) if not self.storage_mode else True
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning(
                'Some resources in server cloud are not cleaned up properly.')
            KBResLogger.dump_and_save('svr',
                                      self.kloud.res_logger.resource_list)

        cleanup_flag = False
        try:
            if self.testing_kloud:
                cleanup_flag = self.testing_kloud.delete_resources()
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning(
                'Some resources in client cloud are not cleaned up properly.')
            KBResLogger.dump_and_save(
                'clt', self.testing_kloud.res_logger.resource_list)

        # Set the kloud to None
        self.kloud = None
        self.testing_kloud = None
Example #5
0
    def __init__(self, scale_cfg, cred, reusing_tenants, testing_side=False):
        self.tenant_list = []
        self.testing_side = testing_side
        self.scale_cfg = scale_cfg
        self.reusing_tenants = reusing_tenants
        self.keystone, self.auth_url = create_keystone_client(cred)
        self.flavor_to_use = None
        self.res_logger = KBResLogger()
        if testing_side:
            self.prefix = 'KBc'
            self.name = 'Client Kloud'
        else:
            self.prefix = 'KBs'
            self.name = 'Server Kloud'
        LOG.info("Creating kloud: " + self.prefix)

        # pre-compute the placement az to use for all VMs
        self.placement_az = None
        if scale_cfg['availability_zone']:
            self.placement_az = scale_cfg['availability_zone']
        LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))
Example #6
0
class Kloud(object):
    def __init__(self, scale_cfg, cred, reusing_tenants,
                 testing_side=False, storage_mode=False, multicast_mode=False):
        self.tenant_list = []
        self.testing_side = testing_side
        self.scale_cfg = scale_cfg
        self.reusing_tenants = reusing_tenants
        self.storage_mode = storage_mode
        self.multicast_mode = multicast_mode
        self.osclient_session = create_auth_session(cred)
        self.flavor_to_use = None
        self.vm_up_count = 0
        self.res_logger = KBResLogger()
        if testing_side:
            self.prefix = 'KBc'
            self.name = 'Client Kloud'
        else:
            self.prefix = 'KBs'
            self.name = 'Server Kloud'
        # pre-compute the placement az to use for all VMs
        self.placement_az = scale_cfg['availability_zone'] \
            if scale_cfg['availability_zone'] else None
        self.exc_info = None

        self.keystone = keystoneclient.Client(session=self.osclient_session,
                                              endpoint_type='publicURL')

        LOG.info("Creating kloud: " + self.prefix)
        if self.placement_az:
            LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))

    def create_resources(self, tenant_quota):
        if self.reusing_tenants:
            for tenant_info in self.reusing_tenants:
                tenant_name = tenant_info['name']
                user_list = tenant_info['user']
                tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota,
                                                reusing_users=user_list)
                self.tenant_list.append(tenant_instance)
        else:
            for tenant_count in xrange(self.scale_cfg['number_tenants']):
                tenant_name = self.prefix + "-T" + str(tenant_count)
                tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota)
                self.res_logger.log('tenants', tenant_instance.tenant_name,
                                    tenant_instance.tenant_id)
                self.tenant_list.append(tenant_instance)

        for tenant_instance in self.tenant_list:
            tenant_instance.create_resources()

        if not self.reusing_tenants:
            # Create flavors for servers, clients, and kb-proxy nodes
            nova_client = self.tenant_list[0].user_list[0].nova_client
            flavor_manager = base_compute.Flavor(nova_client)
            flavor_dict = self.scale_cfg.flavor
            extra_specs = flavor_dict.pop('extra_specs', None)

            if self.storage_mode:
                flavor_dict['ephemeral'] = self.scale_cfg['storage_stage_configs']['disk_size'] \
                    if self.scale_cfg['storage_stage_configs']['target'] == 'ephemeral' else 0
            else:
                flavor_dict['ephemeral'] = 0
            if self.testing_side:
                flv = flavor_manager.create_flavor('KB.proxy', override=True,
                                                   ram=2048, vcpus=1, disk=0, ephemeral=0)
                self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
                flv = flavor_manager.create_flavor('KB.client', override=True, **flavor_dict)
                self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
            else:
                flv = flavor_manager.create_flavor('KB.server', override=True, **flavor_dict)
                self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
            if extra_specs:
                flv.set_keys(extra_specs)


    def delete_resources(self):
        # Deleting flavors created by KloudBuster
        try:
            nova_client = self.tenant_list[0].user_list[0].nova_client
        except Exception:
            # NOVA Client is not yet initialized, so skip cleaning up...
            return True

        flag = True
        if not self.reusing_tenants:
            flavor_manager = base_compute.Flavor(nova_client)
            if self.testing_side:
                flavor_manager.delete_flavor('KB.client')
                flavor_manager.delete_flavor('KB.proxy')
            else:
                flavor_manager.delete_flavor('KB.server')

        for tnt in self.tenant_list:
            flag = flag & tnt.delete_resources()

        return flag

    def get_first_network(self):
        if self.tenant_list:
            return self.tenant_list[0].get_first_network()
        return None

    def get_all_instances(self, include_kb_proxy=False):
        all_instances = []
        for tnt in self.tenant_list:
            all_instances.extend(tnt.get_all_instances())
        if (not include_kb_proxy) and all_instances[-1].vm_name == 'KB-PROXY':
            all_instances.pop()

        return all_instances

    def attach_to_shared_net(self, shared_net):
        # If a shared network exists create a port on this
        # network and attach to router interface
        for tnt in self.tenant_list:
            for usr in tnt.user_list:
                for rtr in usr.router_list:
                    rtr.shared_network = shared_net
                    rtr.attach_router_interface(shared_net, use_port=True)
                    for net in rtr.network_list:
                        for ins in net.instance_list:
                            ins.shared_interface_ip = rtr.shared_interface_ip

    def get_az(self):
        '''Placement algorithm for all VMs created in this kloud
        Return None if placement to be provided by the nova scheduler
        Else return an availability zone to use (e.g. "nova")
        or a compute host to use (e.g. "nova:tme123")
        '''
        return self.placement_az

    def create_vm(self, instance):
        LOG.info("Creating Instance: " + instance.vm_name)
        instance.create_server(**instance.boot_info)
        if not instance.instance:
            raise KBVMCreationException(
                'Instance %s takes too long to become ACTIVE.' % instance.vm_name)

        if instance.vol:
            instance.attach_vol()

        instance.fixed_ip = instance.instance.networks.values()[0][0]
        u_fip = instance.config['use_floatingip']
        if instance.vm_name == "KB-PROXY" and not u_fip and not self.multicast_mode:
            neutron_client = instance.network.router.user.neutron_client
            external_network = base_network.find_external_network(neutron_client)
            instance.fip = base_network.create_floating_ip(neutron_client, external_network)
            instance.fip_ip = instance.fip['floatingip']['floating_ip_address']
            self.res_logger.log('floating_ips',
                                instance.fip['floatingip']['floating_ip_address'],
                                instance.fip['floatingip']['id'])

        if instance.fip:
            # Associate the floating ip with this instance
            instance.instance.add_floating_ip(instance.fip_ip)
            instance.ssh_ip = instance.fip_ip
        else:
            # Store the fixed ip as ssh ip since there is no floating ip
            instance.ssh_ip = instance.fixed_ip

        if not instance.vm_name == "KB-PROXY" and self.multicast_mode:
            nc = instance.network.router.user.neutron_client
            base_network.disable_port_security(nc, instance.fixed_ip)



    def create_vms(self, vm_creation_concurrency):
        try:
            with ThreadPoolExecutor(max_workers=vm_creation_concurrency) as executor:
                for feature in executor.map(self.create_vm, self.get_all_instances()):
                    self.vm_up_count += 1
        except Exception:
            self.exc_info = sys.exc_info()
Example #7
0
class Kloud(object):
    def __init__(self, scale_cfg, cred, reusing_tenants, testing_side=False):
        self.tenant_list = []
        self.testing_side = testing_side
        self.scale_cfg = scale_cfg
        self.reusing_tenants = reusing_tenants
        self.keystone, self.auth_url = create_keystone_client(cred)
        self.flavor_to_use = None
        self.res_logger = KBResLogger()
        if testing_side:
            self.prefix = 'KBc'
            self.name = 'Client Kloud'
        else:
            self.prefix = 'KBs'
            self.name = 'Server Kloud'
        LOG.info("Creating kloud: " + self.prefix)

        # pre-compute the placement az to use for all VMs
        self.placement_az = None
        if scale_cfg['availability_zone']:
            self.placement_az = scale_cfg['availability_zone']
        LOG.info('%s Availability Zone: %s' % (self.name, self.placement_az))

    def create_resources(self, tenant_quota):
        if self.reusing_tenants:
            for tenant_info in self.reusing_tenants:
                tenant_name = tenant_info['name']
                user_list = tenant_info['user']
                tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota,
                                                reusing_users=user_list)
                self.tenant_list.append(tenant_instance)
        else:
            for tenant_count in xrange(self.scale_cfg['number_tenants']):
                tenant_name = self.prefix + "-T" + str(tenant_count)
                tenant_instance = tenant.Tenant(tenant_name, self, tenant_quota)
                self.res_logger.log('tenants', tenant_instance.tenant_name,
                                    tenant_instance.tenant_id)
                self.tenant_list.append(tenant_instance)

        for tenant_instance in self.tenant_list:
            tenant_instance.create_resources()

        if not self.reusing_tenants:
            # Create flavors for servers, clients, and kb-proxy nodes
            nova_client = self.tenant_list[0].user_list[0].nova_client
            flavor_manager = base_compute.Flavor(nova_client)
            flavor_dict = self.scale_cfg.flavor
            if self.testing_side:
                flv = flavor_manager.create_flavor('kb.client', override=True, **flavor_dict)
                self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
                flv = flavor_manager.create_flavor('kb.proxy', override=True,
                                                   ram=2048, vcpus=1, disk=20)
                self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])
            else:
                flv = flavor_manager.create_flavor('kb.server', override=True, **flavor_dict)
                self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])

    def delete_resources(self):
        # Deleting flavors created by KloudBuster
        try:
            nova_client = self.tenant_list[0].user_list[0].nova_client
        except Exception:
            # NOVA Client is not yet initialized, so skip cleaning up...
            return

        if not self.reusing_tenants:
            flavor_manager = base_compute.Flavor(nova_client)
            if self.testing_side:
                flavor_manager.delete_flavor('kb.client')
                flavor_manager.delete_flavor('kb.proxy')
            else:
                flavor_manager.delete_flavor('kb.server')

        for tnt in self.tenant_list:
            tnt.delete_resources()

    def get_first_network(self):
        if self.tenant_list:
            return self.tenant_list[0].get_first_network()
        return None

    def get_all_instances(self, include_kb_proxy=False):
        all_instances = []
        for tnt in self.tenant_list:
            all_instances.extend(tnt.get_all_instances())
        if (not include_kb_proxy) and all_instances[-1].vm_name == 'KB-PROXY':
            all_instances.pop()

        return all_instances

    def attach_to_shared_net(self, shared_net):
        # If a shared network exists create a port on this
        # network and attach to router interface
        for tnt in self.tenant_list:
            for usr in tnt.user_list:
                for rtr in usr.router_list:
                    rtr.shared_network = shared_net
                    rtr.attach_router_interface(shared_net, use_port=True)
                    for net in rtr.network_list:
                        for ins in net.instance_list:
                            ins.shared_interface_ip = rtr.shared_interface_ip

    def get_az(self):
        '''Placement algorithm for all VMs created in this kloud
        Return None if placement to be provided by the nova scheduler
        Else return an availability zone to use (e.g. "nova")
        or a compute host to use (e.g. "nova:tme123")
        '''
        return self.placement_az

    def create_vm(self, instance):
        LOG.info("Creating Instance: " + instance.vm_name)
        instance.create_server(**instance.boot_info)
        if not instance.instance:
            raise KBVMCreationException()

        instance.fixed_ip = instance.instance.networks.values()[0][0]
        if (instance.vm_name == "KB-PROXY") and (not instance.config['use_floatingip']):
            neutron_client = instance.network.router.user.neutron_client
            external_network = base_network.find_external_network(neutron_client)
            instance.fip = base_network.create_floating_ip(neutron_client, external_network)
            instance.fip_ip = instance.fip['floatingip']['floating_ip_address']
            self.res_logger.log('floating_ips',
                                instance.fip['floatingip']['floating_ip_address'],
                                instance.fip['floatingip']['id'])

        if instance.fip:
            # Associate the floating ip with this instance
            instance.instance.add_floating_ip(instance.fip_ip)
            instance.ssh_ip = instance.fip_ip
        else:
            # Store the fixed ip as ssh ip since there is no floating ip
            instance.ssh_ip = instance.fixed_ip

    def create_vms(self, vm_creation_concurrency):
        tpool = ThreadPool(processes=vm_creation_concurrency)
        tpool.map(self.create_vm, self.get_all_instances())
Example #8
0
    def run(self):
        """
        The runner for KloudBuster Tests
        """
        vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
        try:
            tenant_quota = self.calc_tenant_quota()
            self.kloud.create_resources(tenant_quota['server'])
            self.testing_kloud.create_resources(tenant_quota['client'])

            # Start the runner and ready for the incoming redis messages
            client_list = self.testing_kloud.get_all_instances()
            server_list = self.kloud.get_all_instances()

            # Setting up the KloudBuster Proxy node
            self.kb_proxy = client_list[-1]
            client_list.pop()

            self.kb_proxy.vm_name = 'KB-PROXY'
            self.kb_proxy.user_data['role'] = 'KB-PROXY'
            self.kb_proxy.boot_info['flavor_type'] = 'kb.proxy' if \
                not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
            if self.topology:
                proxy_hyper = self.topology.clients_rack.split()[0]
                self.kb_proxy.boot_info['avail_zone'] =\
                    "%s:%s" % (self.testing_kloud.placement_az, proxy_hyper)\
                    if self.testing_kloud.placement_az else "nova:%s" % (proxy_hyper)

            self.kb_proxy.boot_info['user_data'] = str(self.kb_proxy.user_data)
            self.testing_kloud.create_vm(self.kb_proxy)

            self.kb_runner = KBRunner(client_list, self.client_cfg,
                                      kb_vm_agent.get_image_version(),
                                      self.single_cloud)
            self.kb_runner.setup_redis(self.kb_proxy.fip_ip)

            if self.single_cloud:
                # Find the shared network if the cloud used to testing is same
                # Attach the router in tested kloud to the shared network
                shared_net = self.testing_kloud.get_first_network()
                self.kloud.attach_to_shared_net(shared_net)

            # Create VMs in both tested and testing kloud concurrently
            self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms,
                                                            args=[vm_creation_concurrency])
            self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms,
                                                            args=[vm_creation_concurrency])
            self.client_vm_create_thread.daemon = True
            self.server_vm_create_thread.daemon = True
            if self.single_cloud:
                self.gen_user_data("Server")
                self.server_vm_create_thread.start()
                self.server_vm_create_thread.join()
                self.gen_user_data("Client")
                self.client_vm_create_thread.start()
                self.client_vm_create_thread.join()
            else:
                self.gen_user_data("Server")
                self.gen_user_data("Client")
                self.server_vm_create_thread.start()
                self.client_vm_create_thread.start()
                self.server_vm_create_thread.join()
                self.client_vm_create_thread.join()

            # Function that print all the provisioning info
            self.print_provision_info()

            # Run the runner to perform benchmarkings
            self.kb_runner.run()
            self.final_result = self.kb_runner.tool_result
            self.final_result['total_server_vms'] = len(server_list)
            self.final_result['total_client_vms'] = len(client_list)
            # self.final_result['host_stats'] = self.kb_runner.host_stats
            LOG.info(self.final_result)
        except KeyboardInterrupt:
            traceback.format_exc()
        except (ClientException, Exception):
            traceback.print_exc()

        # Cleanup: start with tested side first
        # then testing side last (order is important because of the shared network)
        if self.server_cfg['cleanup_resources']:
            try:
                self.kloud.delete_resources()
            except Exception:
                traceback.print_exc()
                KBResLogger.dump_and_save('svr', self.kloud.res_logger.resource_list)
        if self.client_cfg['cleanup_resources']:
            try:
                self.testing_kloud.delete_resources()
            except Exception:
                traceback.print_exc()
                KBResLogger.dump_and_save('clt', self.testing_kloud.res_logger.resource_list)
Example #9
0
class Kloud(object):
    def __init__(self,
                 scale_cfg,
                 cred,
                 reusing_tenants,
                 vm_img,
                 testing_side=False,
                 storage_mode=False,
                 multicast_mode=False):
        self.tenant_list = []
        self.testing_side = testing_side
        self.scale_cfg = scale_cfg
        self.reusing_tenants = reusing_tenants
        self.storage_mode = storage_mode
        self.multicast_mode = multicast_mode
        self.credentials = cred
        self.osclient_session = cred.get_session()
        self.vm_up_count = 0
        self.res_logger = KBResLogger()
        self.vm_img = vm_img
        if testing_side:
            self.prefix = 'KBc'
            self.name = 'Client Kloud'
        else:
            self.prefix = 'KBs'
            self.name = 'Server Kloud'
        # pre-compute the placement az to use for all VMs
        self.placement_az = scale_cfg['availability_zone'] \
            if scale_cfg['availability_zone'] else None
        self.exc_info = None
        # these client handles use the kloudbuster credentials (usually admin)
        # to do tenant creation, tenant nova+cinder quota allocation and the like
        self.keystone = keystoneclient.Client(session=self.osclient_session)
        self.neutron_client = neutronclient.Client(
            '2.0', endpoint_type='publicURL', session=self.osclient_session)
        self.nova_client = novaclient.Client('2',
                                             endpoint_type='publicURL',
                                             session=self.osclient_session)
        self.cinder_client = cinderclient.Client('2',
                                                 endpoint_type='publicURL',
                                                 session=self.osclient_session)
        LOG.info("Creating kloud: " + self.prefix)
        if self.placement_az:
            LOG.info('%s Availability Zone: %s' %
                     (self.name, self.placement_az))
        # A dict of flavors indexed by flavor name
        self.flavors = {}

    def select_flavor(self):
        # Select an existing flavor that Flavor check
        flavor_manager = base_compute.Flavor(self.nova_client)
        fcand = {'vcpus': sys.maxint, 'ram': sys.maxint, 'disk': sys.maxint}
        # find the smallest flavor that is at least 1vcpu, 1024MB ram and 10MB disk
        for flavor in flavor_manager.list():
            flavor = vars(flavor)
            if flavor['vcpus'] < 1 or flavor['ram'] < 1024 or flavor[
                    'disk'] < 10:
                continue
            if flavor['vcpus'] < fcand['vcpus']:
                fcand = flavor
            elif flavor['vcpus'] == fcand['vcpus']:
                if flavor['ram'] < fcand['ram']:
                    fcand = flavor
                elif flavor['ram'] == fcand[
                        'ram'] and flavor['disk'] < fcand['disk']:
                    fcand = flavor
            find_flag = True

        if find_flag:
            LOG.info('Automatically selecting flavor %s to instantiate VMs.' %
                     fcand['name'])
            return fcand
        LOG.error('Cannot find a flavor which meets the minimum '
                  'requirements to instantiate VMs.')
        raise KBFlavorCheckException()

    def create_resources(self, tenant_quota):
        def create_flavor(fm, name, flavor_dict, extra_specs):
            flavor_dict['name'] = name
            flv = fm.create_flavor(flavor_dict)
            if extra_specs:
                flv.set_keys(extra_specs)
            self.flavors[name] = flv
            self.res_logger.log('flavors', vars(flv)['name'], vars(flv)['id'])

        if self.reusing_tenants:
            for tenant_info in self.reusing_tenants:
                tenant_name = tenant_info['name']
                user_list = tenant_info['user']
                tenant_instance = tenant.Tenant(tenant_name,
                                                self,
                                                tenant_quota,
                                                reusing_users=user_list)
                self.tenant_list.append(tenant_instance)
        else:
            for tenant_count in xrange(self.scale_cfg['number_tenants']):
                tenant_name = self.prefix + "-T" + str(tenant_count)
                tenant_instance = tenant.Tenant(tenant_name, self,
                                                tenant_quota)
                self.res_logger.log('tenants', tenant_instance.tenant_name,
                                    tenant_instance.tenant_id)
                self.tenant_list.append(tenant_instance)

        for tenant_instance in self.tenant_list:
            tenant_instance.create_resources()

        # Create/reuse flavors for this cloud
        if self.reusing_tenants:
            # If tenants are reused, we do not create new flavors but pick one
            # existing that is good enough
            flavor = self.select_flavor()
            if self.testing_side:
                self.flavors[FLAVOR_KB_PROXY] = flavor
                self.flavors[FLAVOR_KB_CLIENT] = flavor
            else:
                self.flavors[FLAVOR_KB_SERVER] = flavor
        else:
            # Create flavors for servers, clients, and kb-proxy nodes
            nova_client = self.tenant_list[0].user_list[0].nova_client
            flavor_manager = base_compute.Flavor(nova_client)
            flavor_dict = self.scale_cfg.flavor
            extra_specs = flavor_dict.pop('extra_specs', None)

            if self.storage_mode:
                flavor_dict['ephemeral'] = self.scale_cfg['storage_stage_configs']['disk_size'] \
                    if self.scale_cfg['storage_stage_configs']['target'] == 'ephemeral' else 0
            else:
                flavor_dict['ephemeral'] = 0
            if self.testing_side:
                proxy_flavor = {
                    "vcpus": 1,
                    "ram": 2048,
                    "disk": 0,
                    "ephemeral": 0
                }
                create_flavor(flavor_manager, FLAVOR_KB_PROXY, proxy_flavor,
                              extra_specs)
                create_flavor(flavor_manager, FLAVOR_KB_CLIENT, flavor_dict,
                              extra_specs)
            else:
                create_flavor(flavor_manager, FLAVOR_KB_SERVER, flavor_dict,
                              extra_specs)

    def delete_resources(self):

        if not self.reusing_tenants:
            for fn, flavor in self.flavors.iteritems():
                LOG.info('Deleting flavor %s', fn)
                try:
                    flavor.delete()
                except Exception as exc:
                    LOG.warning('Error deleting flavor %s: %s', fn, str(exc))

        flag = True
        for tnt in self.tenant_list:
            flag = flag & tnt.delete_resources()

        return flag

    def get_first_network(self):
        if self.tenant_list:
            return self.tenant_list[0].get_first_network()
        return None

    def get_all_instances(self, include_kb_proxy=False):
        all_instances = []
        for tnt in self.tenant_list:
            all_instances.extend(tnt.get_all_instances())
        if (not include_kb_proxy) and all_instances[-1].vm_name == 'KB-PROXY':
            all_instances.pop()

        return all_instances

    def attach_to_shared_net(self, shared_net):
        # If a shared network exists create a port on this
        # network and attach to router interface
        for tnt in self.tenant_list:
            for usr in tnt.user_list:
                for rtr in usr.router_list:
                    rtr.shared_network = shared_net
                    rtr.attach_router_interface(shared_net, use_port=True)
                    for net in rtr.network_list:
                        for ins in net.instance_list:
                            ins.shared_interface_ip = rtr.shared_interface_ip

    def get_az(self):
        '''Placement algorithm for all VMs created in this kloud
        Return None if placement to be provided by the nova scheduler
        Else return an availability zone to use (e.g. "nova")
        or a compute host to use (e.g. "nova:tme123")
        '''
        return self.placement_az

    def create_vm(self, instance):
        LOG.info("Creating Instance: " + instance.vm_name)
        instance.create_server(**instance.boot_info)
        if not instance.instance:
            raise KBVMCreationException(
                'Instance %s takes too long to become ACTIVE.' %
                instance.vm_name)

        if instance.vol:
            instance.attach_vol()

        instance.fixed_ip = instance.instance.networks.values()[0][0]
        u_fip = instance.config['use_floatingip']
        if instance.vm_name == "KB-PROXY" and not u_fip and not self.multicast_mode:
            neutron_client = instance.network.router.user.neutron_client
            external_network = base_network.find_external_network(
                neutron_client)
            instance.fip = base_network.create_floating_ip(
                neutron_client, external_network)
            instance.fip_ip = instance.fip['floatingip']['floating_ip_address']
            self.res_logger.log(
                'floating_ips',
                instance.fip['floatingip']['floating_ip_address'],
                instance.fip['floatingip']['id'])

        if instance.fip:
            # Associate the floating ip with this instance
            instance.instance.add_floating_ip(instance.fip_ip)
            instance.ssh_ip = instance.fip_ip
        else:
            # Store the fixed ip as ssh ip since there is no floating ip
            instance.ssh_ip = instance.fixed_ip

        if not instance.vm_name == "KB-PROXY" and self.multicast_mode:
            nc = instance.network.router.user.neutron_client
            base_network.disable_port_security(nc, instance.fixed_ip)

    def create_vms(self, vm_creation_concurrency):
        try:
            with ThreadPoolExecutor(
                    max_workers=vm_creation_concurrency) as executor:
                for feature in executor.map(self.create_vm,
                                            self.get_all_instances()):
                    self.vm_up_count += 1
        except Exception:
            self.exc_info = sys.exc_info()