Ejemplo n.º 1
0
    def stage(self):
        """
        Staging all resources for running KloudBuster Tests
        """
        vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
        tenant_quota = self.calc_tenant_quota()
        if not self.storage_mode:
            self.kloud = Kloud(self.server_cfg, self.server_cred, self.tenants_list['server'],
                               storage_mode=self.storage_mode, multicast_mode=self.multicast_mode)
            self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms,
                                                            args=[vm_creation_concurrency])
            self.server_vm_create_thread.daemon = True
        self.testing_kloud = Kloud(self.client_cfg, self.client_cred,
                                   self.tenants_list['client'], testing_side=True,
                                   storage_mode=self.storage_mode,
                                   multicast_mode=self.multicast_mode)
        self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms,
                                                        args=[vm_creation_concurrency])
        self.client_vm_create_thread.daemon = True

        if not self.storage_mode:
            self.kloud.create_resources(tenant_quota['server'])
        self.testing_kloud.create_resources(tenant_quota['client'])

        # Setting up the KloudBuster Proxy node
        client_list = self.testing_kloud.get_all_instances()
        self.kb_proxy = client_list[-1]
        client_list.pop()

        self.kb_proxy.vm_name = 'KB-PROXY'
        self.kb_proxy.user_data['role'] = 'KB-PROXY'
        self.kb_proxy.boot_info['flavor_type'] = 'KB.proxy' if \
            not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
        if self.topology:
            proxy_hyper = self.topology.clients_rack[0]
            self.kb_proxy.boot_info['avail_zone'] = \
                "%s:%s" % (self.testing_kloud.placement_az, proxy_hyper) \
                if self.testing_kloud.placement_az else "nova:%s" % (proxy_hyper)

        self.kb_proxy.boot_info['user_data'] = str(self.kb_proxy.user_data)
        self.testing_kloud.create_vm(self.kb_proxy)
        if self.storage_mode:
            self.kb_runner = KBRunner_Storage(client_list, self.client_cfg,
                                              kb_vm_agent.get_image_version())
        elif self.multicast_mode:
            self.kb_runner = KBRunner_Multicast(client_list, self.client_cfg,
                                                kb_vm_agent.get_image_version(),
                                                self.single_cloud)

        else:
            self.kb_runner = KBRunner_HTTP(client_list, self.client_cfg,
                                           kb_vm_agent.get_image_version(),
                                           self.single_cloud)

        self.kb_runner.setup_redis(self.kb_proxy.fip_ip or self.kb_proxy.fixed_ip)
        if self.client_cfg.progression['enabled'] and not self.multicast_mode:
            log_info = "Progression run is enabled, KloudBuster will schedule " \
                       "multiple runs as listed:"
            stage = 1
            start = self.client_cfg.progression.vm_start
            multiple = self.client_cfg.progression.vm_multiple
            cur_vm_count = 1 if start else multiple
            total_vm = self.get_tenant_vm_count(self.client_cfg)
            while (cur_vm_count <= total_vm):
                log_info += "\n" + self.kb_runner.header_formatter(stage, cur_vm_count)
                cur_vm_count = (stage + 1 - start) * multiple
                stage += 1
            LOG.info(log_info)

        if self.single_cloud and not self.storage_mode and not self.multicast_mode:
            # Find the shared network if the cloud used to testing is same
            # Attach the router in tested kloud to the shared network
            shared_net = self.testing_kloud.get_first_network()
            self.kloud.attach_to_shared_net(shared_net)

        # Create VMs in both tested and testing kloud concurrently
        user_data_mode = "multicast" if self.multicast_mode else "http"
        if self.storage_mode:
            self.gen_client_user_data("storage")
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        elif self.single_cloud:
            self.gen_server_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.gen_client_user_data(user_data_mode)
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        else:
            self.gen_server_user_data(user_data_mode)
            self.gen_client_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.client_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.client_vm_create_thread.join()

        if self.testing_kloud and self.testing_kloud.exc_info:
            raise self.testing_kloud.exc_info[1], None, self.testing_kloud.exc_info[2]

        if self.kloud and self.kloud.exc_info:
            raise self.kloud.exc_info[1], None, self.kloud.exc_info[2]

        # Function that print all the provisioning info
        self.print_provision_info()
Ejemplo n.º 2
0
    def stage(self):
        """
        Staging all resources for running KloudBuster Tests
        """
        vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
        tenant_quota = self.calc_tenant_quota()
        if not self.storage_mode:
            self.kloud = Kloud(self.server_cfg,
                               self.server_cred,
                               self.tenants_list['server'],
                               self.server_vm_img,
                               storage_mode=self.storage_mode,
                               multicast_mode=self.multicast_mode)
            self.server_vm_create_thread = threading.Thread(
                target=self.kloud.create_vms, args=[vm_creation_concurrency])
            self.server_vm_create_thread.daemon = True
        self.testing_kloud = Kloud(self.client_cfg,
                                   self.client_cred,
                                   self.tenants_list['client'],
                                   self.client_vm_img,
                                   testing_side=True,
                                   storage_mode=self.storage_mode,
                                   multicast_mode=self.multicast_mode)
        self.client_vm_create_thread = threading.Thread(
            target=self.testing_kloud.create_vms,
            args=[vm_creation_concurrency])
        self.client_vm_create_thread.daemon = True

        if not self.storage_mode:
            self.kloud.create_resources(tenant_quota['server'])
        self.testing_kloud.create_resources(tenant_quota['client'])

        # Setting up the KloudBuster Proxy node
        client_list = self.testing_kloud.get_all_instances()
        self.kb_proxy = client_list[-1]
        client_list.pop()

        self.kb_proxy.vm_name = 'KB-PROXY'
        self.kb_proxy.user_data['role'] = 'KB-PROXY'
        self.kb_proxy.boot_info['flavor_type'] = FLAVOR_KB_PROXY
        if self.topology:
            proxy_hyper = self.topology.clients_rack[0]
            self.kb_proxy.boot_info['avail_zone'] = \
                "%s:%s" % (self.testing_kloud.placement_az, proxy_hyper) \
                if self.testing_kloud.placement_az else "nova:%s" % (proxy_hyper)

        self.kb_proxy.boot_info['user_data'] = str(self.kb_proxy.user_data)
        self.testing_kloud.create_vm(self.kb_proxy)
        if self.storage_mode:
            self.kb_runner = KBRunner_Storage(client_list, self.client_cfg)
        elif self.multicast_mode:
            self.kb_runner = KBRunner_Multicast(client_list, self.client_cfg,
                                                self.single_cloud)

        else:
            self.kb_runner = KBRunner_HTTP(client_list, self.client_cfg,
                                           self.single_cloud)

        self.kb_runner.setup_redis(self.kb_proxy.fip_ip
                                   or self.kb_proxy.fixed_ip)
        if self.client_cfg.progression['enabled'] and not self.multicast_mode:
            log_info = "Progression run is enabled, KloudBuster will schedule " \
                       "multiple runs as listed:"
            stage = 1
            start = self.client_cfg.progression.vm_start
            multiple = self.client_cfg.progression.vm_multiple
            cur_vm_count = 1 if start else multiple
            # Minus 1 for KB-Proxy
            total_vm = self.get_tenant_vm_count(self.client_cfg) - 1
            while (cur_vm_count <= total_vm):
                log_info += "\n" + self.kb_runner.header_formatter(
                    stage, cur_vm_count)
                cur_vm_count = (stage + 1 - start) * multiple
                stage += 1
            LOG.info(log_info)

        if self.single_cloud and not self.storage_mode and not self.multicast_mode:
            # Find the shared network if the cloud used to testing is same
            # Attach the router in tested kloud to the shared network
            shared_net = self.testing_kloud.get_first_network()
            self.kloud.attach_to_shared_net(shared_net)

        # Create VMs in both tested and testing kloud concurrently
        user_data_mode = "multicast" if self.multicast_mode else "http"
        if self.storage_mode:
            self.gen_client_user_data("storage")
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        elif self.single_cloud:
            self.gen_server_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.gen_client_user_data(user_data_mode)
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        else:
            self.gen_server_user_data(user_data_mode)
            self.gen_client_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.client_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.client_vm_create_thread.join()

        if self.testing_kloud and self.testing_kloud.exc_info:
            raise self.testing_kloud.exc_info[
                1], None, self.testing_kloud.exc_info[2]

        if self.kloud and self.kloud.exc_info:
            raise self.kloud.exc_info[1], None, self.kloud.exc_info[2]

        # Function that print all the provisioning info
        self.print_provision_info()
Ejemplo n.º 3
0
class KloudBuster(object):
    """
    Creates resources on the cloud for loading up the cloud
    1. Tenants
    2. Users per tenant
    3. Routers per user
    4. Networks per router
    5. Instances per network
    """

    def __init__(self, server_cred, client_cred, server_cfg, client_cfg,
                 topology, tenants_list, storage_mode=False, multicast_mode=False):
        # List of tenant objects to keep track of all tenants
        self.server_cred = server_cred
        self.client_cred = client_cred
        self.server_cfg = server_cfg
        self.client_cfg = client_cfg
        self.storage_mode = storage_mode
        self.multicast_mode = multicast_mode


        if topology and tenants_list:
            self.topology = None
            LOG.warning("REUSING MODE: Topology configs will be ignored.")
        else:
            self.topology = topology
        if tenants_list:
            self.tenants_list = {}
            self.tenants_list['server'] = \
                [{'name': tenants_list['tenant_name'], 'user': tenants_list['server_user']}]
            self.tenants_list['client'] = \
                [{'name': tenants_list['tenant_name'], 'user': tenants_list['client_user']}]
            LOG.warning("REUSING MODE: The quotas will not be adjusted automatically.")
            LOG.warning("REUSING MODE: The flavor configs will be ignored.")
        else:
            self.tenants_list = {'server': None, 'client': None}
        # TODO(check on same auth_url instead)
        self.single_cloud = False if client_cred else True
        if not client_cred:
            self.client_cred = server_cred
        # Automatically enable the floating IP for server cloud under dual-cloud mode
        if not self.single_cloud and not self.server_cfg['use_floatingip']:
            self.server_cfg['use_floatingip'] = True
            LOG.info('Automatically setting "use_floatingip" to True for server cloud...')

        self.kb_proxy = None
        self.final_result = {}
        self.server_vm_create_thread = None
        self.client_vm_create_thread = None
        self.kb_runner = None
        self.fp_logfile = None
        self.kloud = None
        self.testing_kloud = None

    def get_hypervisor_list(self, cred):
        ret_list = []
        sess = create_auth_session(cred)
        nova_client = novaclient('2', endpoint_type='publicURL',
                                 http_log_debug=True, session=sess)
        for hypervisor in nova_client.hypervisors.list():
            if vars(hypervisor)['status'] == 'enabled':
                ret_list.append(vars(hypervisor)['hypervisor_hostname'])

        return ret_list

    def get_az_list(self, cred):
        ret_list = []
        sess = create_auth_session(cred)
        nova_client = novaclient('2', endpoint_type='publicURL',
                                 http_log_debug=True, session=sess)
        for az in nova_client.availability_zones.list():
            zoneName = vars(az)['zoneName']
            isAvail = vars(az)['zoneState']['available']
            if zoneName != 'internal' and isAvail:
                ret_list.append(zoneName)

        return ret_list

    def check_and_upload_images(self, retry_count=150):
        retry = 0
        creds_list = [create_auth_session(self.server_cred),
                      create_auth_session(self.client_cred)]
        creds_dict = dict(zip(['Server kloud', 'Client kloud'], creds_list))
        img_name_dict = dict(zip(['Server kloud', 'Client kloud'],
                                 [self.server_cfg.image_name, self.client_cfg.image_name]))

        for kloud, sess in creds_dict.items():
            glance_client = glanceclient.Client('1', session=sess)
            try:
                # Search for the image
                img = glance_client.images.list(filters={'name': img_name_dict[kloud]}).next()
                continue
            except StopIteration:
                pass

            # Trying to upload images
            kb_image_name = kb_vm_agent.get_image_name() + '.qcow2'
            image_url = 'http://storage.apps.openstack.org/images/%s' % kb_image_name
            LOG.info("Image is not found in %s, uploading from OpenStack App Store..." % kloud)
            try:
                img = glance_client.images.create(name=img_name_dict[kloud],
                                                  disk_format="qcow2",
                                                  container_format="bare",
                                                  is_public=True,
                                                  copy_from=image_url)
                while img.status in ['queued', 'saving'] and retry < retry_count:
                    img = glance_client.images.find(name=img.name)
                    retry = retry + 1
                    time.sleep(2)
                if img.status != 'active':
                    raise Exception
            except glance_exception.HTTPForbidden:
                LOG.error("Cannot upload image without admin access. Please make sure the "
                          "image is uploaded and is either public or owned by you.")
                return False
            except Exception:
                LOG.error("Failed while uploading the image, please make sure the cloud "
                          "under test has the access to URL: %s." % image_url)
                return False

        return True

    def print_provision_info(self):
        """
        Function that iterates and prints all VM info
        for tested and testing cloud
        """
        if not self.storage_mode:
            table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet",
                      "Shared Interface IP"]]
            client_list = self.kloud.get_all_instances()
            for instance in client_list:
                row = [instance.vm_name, instance.host, instance.fixed_ip,
                       instance.fip_ip, instance.subnet_ip, instance.shared_interface_ip]
                table.append(row)
            LOG.info('Provision Details (Tested Kloud)\n' +
                     tabulate(table, headers="firstrow", tablefmt="psql"))

        table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet"]]
        client_list = self.testing_kloud.get_all_instances(include_kb_proxy=True)
        for instance in client_list:
            row = [instance.vm_name, instance.host, instance.fixed_ip,
                   instance.fip_ip, instance.subnet_ip]
            table.append(row)
        LOG.info('Provision Details (Testing Kloud)\n' +
                 tabulate(table, headers="firstrow", tablefmt="psql"))

    def gen_server_user_data(self, test_mode):
        LOG.info("Preparing metadata for VMs... (Server)")
        server_list = self.kloud.get_all_instances()
        idx = 0
        KBScheduler.setup_vm_placement('Server', server_list, self.topology,
                                       self.kloud.placement_az, "Round-robin")
        if test_mode == 'http':
            for ins in server_list:
                ins.user_data['role'] = 'HTTP_Server'
                ins.user_data['http_server_configs'] = ins.config['http_server_configs']
                ins.boot_info['flavor_type'] = 'KB.server' if \
                    not self.tenants_list['server'] else self.kloud.flavor_to_use
                ins.boot_info['user_data'] = str(ins.user_data)
        elif test_mode == 'multicast':
            # Nuttcp tests over first /25
            # Multicast Listeners over second /25
            mc_ad_st = self.client_cfg['multicast_tool_configs']['multicast_address_start']
            listener_addr_start = mc_ad_st.split(".")
            listener_addr_start[-1] = "128"
            naddrs = self.client_cfg['multicast_tool_configs']['addresses'][-1]
            clocks = " ".join(self.client_cfg['multicast_tool_configs']['ntp_clocks'])
            nports = self.client_cfg['multicast_tool_configs']['ports'][-1]
            cfgs = self.client_cfg['multicast_tool_configs']
            listener_addr_start = ".".join(listener_addr_start)
            for ins in server_list:
                ins.user_data['role'] = 'Multicast_Server'
                ins.user_data['n_id'] = idx
                idx += 1
                ins.user_data['multicast_server_configs'] = cfgs
                ins.user_data['multicast_addresses'] = naddrs
                ins.user_data['multicast_ports'] = nports
                ins.user_data['multicast_start_address'] = mc_ad_st
                ins.user_data['multicast_listener_address_start'] = listener_addr_start
                ins.user_data['ntp_clocks'] = clocks
                ins.user_data['pktsizes'] = self.client_cfg.multicast_tool_configs.pktsizes
                ins.boot_info['flavor_type'] = 'KB.server' if \
                    not self.tenants_list['server'] else self.kloud.flavor_to_use
                ins.boot_info['user_data'] = str(ins.user_data)

    def gen_client_user_data(self, test_mode):
        LOG.info("Preparing metadata for VMs... (Client)")
        client_list = self.testing_kloud.get_all_instances()
        KBScheduler.setup_vm_placement('Client', client_list, self.topology,
                                       self.testing_kloud.placement_az, "Round-robin")
        if test_mode != 'storage':
            role = 'HTTP_Client' if test_mode == 'http' else 'Multicast_Client'
            algo = '1:1' if test_mode == 'http' else '1:n'
            server_list = self.kloud.get_all_instances()
            clocks = " ".join(self.client_cfg['multicast_tool_configs']['ntp_clocks'])
            KBScheduler.setup_vm_mappings(client_list, server_list, algo)
            for idx, ins in enumerate(client_list):
                ins.user_data['role'] = role
                ins.user_data['vm_name'] = ins.vm_name
                ins.user_data['redis_server'] = self.kb_proxy.fixed_ip
                ins.user_data['redis_server_port'] = 6379
                ins.user_data['target_subnet_ip'] = server_list[idx].subnet_ip
                ins.user_data['target_shared_interface_ip'] = server_list[idx].shared_interface_ip
                if role == 'Multicast_Client':
                    ins.user_data['ntp_clocks'] = clocks
                ins.boot_info['flavor_type'] = 'KB.client' if \
                    not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
                ins.boot_info['user_data'] = str(ins.user_data)
        else:
            for idx, ins in enumerate(client_list):
                ins.user_data['role'] = 'Storage_Client'
                ins.user_data['vm_name'] = ins.vm_name
                ins.user_data['redis_server'] = self.kb_proxy.fixed_ip
                ins.user_data['redis_server_port'] = 6379
                ins.boot_info['flavor_type'] = 'KB.client' if \
                    not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
                ins.boot_info['user_data'] = str(ins.user_data)

    def gen_metadata(self):
        self.final_result = {}
        self.final_result['time'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        self.final_result['test_mode'] = 'storage' if self.storage_mode else 'http'
        if self.storage_mode:
            self.final_result['storage_target'] = self.client_cfg.storage_stage_configs.target
        if self.multicast_mode:
            self.final_result['test_mode'] = 'multicast'
        self.final_result['version'] = __version__
        self.final_result['kb_result'] = []

    def run(self):
        try:
            self.stage()
            self.run_test()
        except KBException as e:
            LOG.error(e.message)
        except base_network.KBGetProvNetException:
            pass
        except Exception:
            traceback.print_exc()
        except KeyboardInterrupt:
            LOG.info('Terminating KloudBuster...')
        finally:
            if self.server_cfg['cleanup_resources'] and self.client_cfg['cleanup_resources']:
                self.cleanup()

    def stage(self):
        """
        Staging all resources for running KloudBuster Tests
        """
        vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
        tenant_quota = self.calc_tenant_quota()
        if not self.storage_mode:
            self.kloud = Kloud(self.server_cfg, self.server_cred, self.tenants_list['server'],
                               storage_mode=self.storage_mode, multicast_mode=self.multicast_mode)
            self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms,
                                                            args=[vm_creation_concurrency])
            self.server_vm_create_thread.daemon = True
        self.testing_kloud = Kloud(self.client_cfg, self.client_cred,
                                   self.tenants_list['client'], testing_side=True,
                                   storage_mode=self.storage_mode,
                                   multicast_mode=self.multicast_mode)
        self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms,
                                                        args=[vm_creation_concurrency])
        self.client_vm_create_thread.daemon = True

        if not self.storage_mode:
            self.kloud.create_resources(tenant_quota['server'])
        self.testing_kloud.create_resources(tenant_quota['client'])

        # Setting up the KloudBuster Proxy node
        client_list = self.testing_kloud.get_all_instances()
        self.kb_proxy = client_list[-1]
        client_list.pop()

        self.kb_proxy.vm_name = 'KB-PROXY'
        self.kb_proxy.user_data['role'] = 'KB-PROXY'
        self.kb_proxy.boot_info['flavor_type'] = 'KB.proxy' if \
            not self.tenants_list['client'] else self.testing_kloud.flavor_to_use
        if self.topology:
            proxy_hyper = self.topology.clients_rack[0]
            self.kb_proxy.boot_info['avail_zone'] = \
                "%s:%s" % (self.testing_kloud.placement_az, proxy_hyper) \
                if self.testing_kloud.placement_az else "nova:%s" % (proxy_hyper)

        self.kb_proxy.boot_info['user_data'] = str(self.kb_proxy.user_data)
        self.testing_kloud.create_vm(self.kb_proxy)
        if self.storage_mode:
            self.kb_runner = KBRunner_Storage(client_list, self.client_cfg,
                                              kb_vm_agent.get_image_version())
        elif self.multicast_mode:
            self.kb_runner = KBRunner_Multicast(client_list, self.client_cfg,
                                                kb_vm_agent.get_image_version(),
                                                self.single_cloud)

        else:
            self.kb_runner = KBRunner_HTTP(client_list, self.client_cfg,
                                           kb_vm_agent.get_image_version(),
                                           self.single_cloud)

        self.kb_runner.setup_redis(self.kb_proxy.fip_ip or self.kb_proxy.fixed_ip)
        if self.client_cfg.progression['enabled'] and not self.multicast_mode:
            log_info = "Progression run is enabled, KloudBuster will schedule " \
                       "multiple runs as listed:"
            stage = 1
            start = self.client_cfg.progression.vm_start
            multiple = self.client_cfg.progression.vm_multiple
            cur_vm_count = 1 if start else multiple
            total_vm = self.get_tenant_vm_count(self.client_cfg)
            while (cur_vm_count <= total_vm):
                log_info += "\n" + self.kb_runner.header_formatter(stage, cur_vm_count)
                cur_vm_count = (stage + 1 - start) * multiple
                stage += 1
            LOG.info(log_info)

        if self.single_cloud and not self.storage_mode and not self.multicast_mode:
            # Find the shared network if the cloud used to testing is same
            # Attach the router in tested kloud to the shared network
            shared_net = self.testing_kloud.get_first_network()
            self.kloud.attach_to_shared_net(shared_net)

        # Create VMs in both tested and testing kloud concurrently
        user_data_mode = "multicast" if self.multicast_mode else "http"
        if self.storage_mode:
            self.gen_client_user_data("storage")
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        elif self.single_cloud:
            self.gen_server_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.gen_client_user_data(user_data_mode)
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        else:
            self.gen_server_user_data(user_data_mode)
            self.gen_client_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.client_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.client_vm_create_thread.join()

        if self.testing_kloud and self.testing_kloud.exc_info:
            raise self.testing_kloud.exc_info[1], None, self.testing_kloud.exc_info[2]

        if self.kloud and self.kloud.exc_info:
            raise self.kloud.exc_info[1], None, self.kloud.exc_info[2]

        # Function that print all the provisioning info
        self.print_provision_info()

    def run_test(self, test_only=False):
        self.gen_metadata()
        self.kb_runner.config = self.client_cfg
        # Run the runner to perform benchmarkings
        for run_result in self.kb_runner.run(test_only):
            if not self.multicast_mode or len(self.final_result['kb_result']) == 0:
                self.final_result['kb_result'].append(self.kb_runner.tool_result)
        LOG.info('SUMMARY: %s' % self.final_result)

    def stop_test(self):
        self.kb_runner.stop()
        LOG.info('Testing is stopped by request.')

    def cleanup(self):
        # Stop the runner, shutdown the redis thread
        if self.kb_runner:
            try:
                self.kb_runner.dispose()
            except Exception:
                pass

        # Cleanup: start with tested side first
        # then testing side last (order is important because of the shared network)
        cleanup_flag = False
        try:
            cleanup_flag = self.kloud.delete_resources() if not self.storage_mode else True
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning('Some resources in server cloud are not cleaned up properly.')
            KBResLogger.dump_and_save('svr', self.kloud.res_logger.resource_list)

        cleanup_flag = False
        try:
            cleanup_flag = self.testing_kloud.delete_resources()
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning('Some resources in client cloud are not cleaned up properly.')
            KBResLogger.dump_and_save('clt', self.testing_kloud.res_logger.resource_list)

        # Set the kloud to None
        self.kloud = None
        self.testing_kloud = None


    def dump_logs(self, offset=0):
        if not self.fp_logfile:
            return ""

        self.fp_logfile.seek(offset)
        return self.fp_logfile.read()

    def dispose(self):
        if self.fp_logfile:
            self.fp_logfile.close()
        logging.delete_logfile('kloudbuster', self.fp_logfile.name)
        self.fp_logfile = None

    def get_tenant_vm_count(self, config):
        return (config['routers_per_tenant'] * config['networks_per_router'] *
                config['vms_per_network'])

    def calc_neutron_quota(self):
        total_vm = self.get_tenant_vm_count(self.server_cfg)

        server_quota = {}
        server_quota['network'] = self.server_cfg['routers_per_tenant'] * \
            self.server_cfg['networks_per_router']
        server_quota['subnet'] = server_quota['network']
        server_quota['router'] = self.server_cfg['routers_per_tenant']
        if (self.server_cfg['use_floatingip']):
            # (1) Each VM has one floating IP
            # (2) Each Router has one external IP
            server_quota['floatingip'] = total_vm + server_quota['router']
            # (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s)
            # (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s)
            # (3) Each Network has one router_interface (gateway), and one DHCP agent, total of
            #     server_quota['network'] * 2 port(s)
            # (4) Each Router has one external IP, takes up 1 port, total of
            #     server_quota['router'] port(s)
            server_quota['port'] = 2 * total_vm + 2 * server_quota['network'] + \
                server_quota['router'] + 10
        else:
            server_quota['floatingip'] = server_quota['router']
            server_quota['port'] = total_vm + 2 * server_quota['network'] + \
                server_quota['router'] + 10
        server_quota['security_group'] = server_quota['network'] + 1
        server_quota['security_group_rule'] = server_quota['security_group'] * 10

        client_quota = {}
        total_vm = self.get_tenant_vm_count(self.client_cfg)
        client_quota['network'] = 1
        client_quota['subnet'] = 1
        client_quota['router'] = 1
        if (self.client_cfg['use_floatingip']):
            # (1) Each VM has one floating IP
            # (2) Each Router has one external IP, total of 1 router
            # (3) KB-Proxy node has one floating IP
            client_quota['floatingip'] = total_vm + 1 + 1
            # (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s)
            # (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s)
            # (3) Each Network has one router_interface (gateway), and one DHCP agent, total of
            #     client_quota['network'] * 2 port(s)
            # (4) KB-Proxy node takes up 2 ports, one for fixed IP, one for floating IP
            # (5) Each Router has one external IP, takes up 1 port, total of 1 router/port
            client_quota['port'] = 2 * total_vm + 2 * client_quota['network'] + 2 + 1 + 10
        else:
            client_quota['floatingip'] = 1 + 1
            client_quota['port'] = total_vm + 2 * client_quota['network'] + 2 + 1
        if self.single_cloud:
            # Under single-cloud mode, the shared network is attached to every router in server
            # cloud, and each one takes up 1 port on client side.
            client_quota['port'] = client_quota['port'] + server_quota['router'] + 10
        client_quota['security_group'] = client_quota['network'] + 1
        client_quota['security_group_rule'] = client_quota['security_group'] * 10

        return [server_quota, client_quota]

    def calc_nova_quota(self):
        total_vm = self.get_tenant_vm_count(self.server_cfg)
        server_quota = {}
        server_quota['instances'] = total_vm
        server_quota['cores'] = total_vm * self.server_cfg['flavor']['vcpus']
        server_quota['ram'] = total_vm * self.server_cfg['flavor']['ram']

        client_quota = {}
        total_vm = self.get_tenant_vm_count(self.client_cfg)
        client_quota['instances'] = total_vm + 1
        client_quota['cores'] = total_vm * self.client_cfg['flavor']['vcpus'] + 1
        client_quota['ram'] = total_vm * self.client_cfg['flavor']['ram'] + 2048

        return [server_quota, client_quota]

    def calc_cinder_quota(self):
        total_vm = self.get_tenant_vm_count(self.server_cfg)
        svr_disk = self.server_cfg['flavor']['disk']\
            if self.server_cfg['flavor']['disk'] != 0 else 20
        server_quota = {}
        server_quota['gigabytes'] = total_vm * svr_disk
        server_quota['volumes'] = total_vm

        total_vm = self.get_tenant_vm_count(self.client_cfg)
        clt_disk = self.client_cfg['flavor']['disk']\
            if self.client_cfg['flavor']['disk'] != 0 else 20
        client_quota = {}
        client_quota['gigabytes'] = total_vm * clt_disk + 20
        client_quota['volumes'] = total_vm

        return [server_quota, client_quota]

    def calc_tenant_quota(self):
        quota_dict = {'server': {}, 'client': {}}
        nova_quota = self.calc_nova_quota()
        neutron_quota = self.calc_neutron_quota()
        cinder_quota = self.calc_cinder_quota()
        for idx, val in enumerate(['server', 'client']):
            quota_dict[val]['nova'] = nova_quota[idx]
            quota_dict[val]['neutron'] = neutron_quota[idx]
            quota_dict[val]['cinder'] = cinder_quota[idx]

        return quota_dict
Ejemplo n.º 4
0
class KloudBuster(object):
    """
    Creates resources on the cloud for loading up the cloud
    1. Tenants
    2. Users per tenant
    3. Routers per user
    4. Networks per router
    5. Instances per network
    """
    def __init__(self,
                 server_cred,
                 client_cred,
                 server_cfg,
                 client_cfg,
                 topology,
                 tenants_list,
                 storage_mode=False,
                 multicast_mode=False):
        # List of tenant objects to keep track of all tenants
        self.server_cred = server_cred
        self.client_cred = client_cred
        self.server_cfg = server_cfg
        self.client_cfg = client_cfg
        self.storage_mode = storage_mode
        self.multicast_mode = multicast_mode

        if topology and tenants_list:
            self.topology = None
            LOG.warning("REUSING MODE: Topology configs will be ignored.")
        else:
            self.topology = topology
        if tenants_list:
            self.tenants_list = {}
            self.tenants_list['server'] = \
                [{'name': tenants_list['tenant_name'], 'user': tenants_list['server_user']}]
            self.tenants_list['client'] = \
                [{'name': tenants_list['tenant_name'], 'user': tenants_list['client_user']}]
            LOG.warning(
                "REUSING MODE: The quotas will not be adjusted automatically.")
            LOG.warning("REUSING MODE: The flavor configs will be ignored.")
        else:
            self.tenants_list = {'server': None, 'client': None}
        # TODO(check on same auth_url instead)
        self.single_cloud = False if client_cred else True
        if not client_cred:
            self.client_cred = server_cred
        # Automatically enable the floating IP for server cloud under dual-cloud mode
        if not self.single_cloud and not self.server_cfg['use_floatingip']:
            self.server_cfg['use_floatingip'] = True
            LOG.info(
                'Automatically setting "use_floatingip" to True for server cloud...'
            )

        self.kb_proxy = None
        self.final_result = {}
        self.server_vm_create_thread = None
        self.client_vm_create_thread = None
        self.kb_runner = None
        self.fp_logfile = None
        self.kloud = None
        self.testing_kloud = None
        self.server_vm_img = None
        self.client_vm_img = None

    def get_hypervisor_list(self, cred):
        ret_list = []
        sess = cred.get_session()
        nova_client = novaclient('2',
                                 endpoint_type='publicURL',
                                 http_log_debug=True,
                                 session=sess)
        for hypervisor in nova_client.hypervisors.list():
            if vars(hypervisor)['status'] == 'enabled':
                ret_list.append(vars(hypervisor)['hypervisor_hostname'])

        return ret_list

    def get_az_list(self, cred):
        ret_list = []
        sess = cred.get_session()
        nova_client = novaclient('2',
                                 endpoint_type='publicURL',
                                 http_log_debug=True,
                                 session=sess)
        for az in nova_client.availability_zones.list():
            zoneName = vars(az)['zoneName']
            isAvail = vars(az)['zoneState']['available']
            if zoneName != 'internal' and isAvail:
                ret_list.append(zoneName)

        return ret_list

    def check_and_upload_image(self, kloud_name, image_name, image_url, sess,
                               retry_count):
        '''Check a VM image and upload it if not found
        '''
        glance_client = glanceclient.Client('2', session=sess)
        try:
            # Search for the image
            img = glance_client.images.list(filters={
                'name': image_name
            }).next()
            # image found
            return img
        except StopIteration:
            sys.exc_clear()

        # Trying to upload image
        LOG.info(
            "KloudBuster VM Image is not found in %s, trying to upload it..." %
            kloud_name)
        if not image_url:
            LOG.error(
                'Configuration file is missing a VM image pathname (vm_image_name)'
            )
            return None
        retry = 0
        try:
            LOG.info("Uploading VM Image from %s..." % image_url)
            with open(image_url) as f_image:
                img = glance_client.images.create(name=image_name,
                                                  disk_format="qcow2",
                                                  container_format="bare",
                                                  visibility="public")
                glance_client.images.upload(img.id, image_data=f_image)
            # Check for the image in glance
            while img.status in ['queued', 'saving'] and retry < retry_count:
                img = glance_client.images.get(img.id)
                retry += 1
                LOG.debug("Image not yet active, retrying %s of %s...", retry,
                          retry_count)
                time.sleep(2)
            if img.status != 'active':
                LOG.error("Image uploaded but too long to get to active state")
                raise Exception("Image update active state timeout")
        except glance_exception.HTTPForbidden:
            LOG.error(
                "Cannot upload image without admin access. Please make "
                "sure the image is uploaded and is either public or owned by you."
            )
            return None
        except IOError as exc:
            # catch the exception for file based errors.
            LOG.error(
                "Failed while uploading the image. Please make sure the "
                "image at the specified location %s is correct: %s", image_url,
                str(exc))
            return None
        except keystoneauth1.exceptions.http.NotFound as exc:
            LOG.error("Authentication error while uploading the image: " +
                      str(exc))
            return None
        except Exception:
            LOG.error(traceback.format_exc())
            LOG.error("Failed while uploading the image: %s", str(exc))
            return None
        return img

    def check_and_upload_images(self, retry_count=150):
        image_name = self.client_cfg.image_name
        image_url = self.client_cfg.vm_image_file
        self.server_vm_img = self.check_and_upload_image(
            'Server kloud', image_name, image_url,
            self.server_cred.get_session(), retry_count)
        if self.server_vm_img is None:
            return False
        if self.client_cred == self.server_cred:
            self.client_vm_img = self.server_vm_img
        else:
            self.client_vm_img = self.check_and_upload_image(
                'Client kloud', image_name, image_url,
                self.client_cred.get_session(), retry_count)
        return self.client_vm_img is not None

    def print_provision_info(self):
        """
        Function that iterates and prints all VM info
        for tested and testing cloud
        """
        if not self.storage_mode:
            table = [[
                "VM Name", "Host", "Internal IP", "Floating IP", "Subnet",
                "Shared Interface IP"
            ]]
            client_list = self.kloud.get_all_instances()
            for instance in client_list:
                row = [
                    instance.vm_name, instance.host, instance.fixed_ip,
                    instance.fip_ip, instance.subnet_ip,
                    instance.shared_interface_ip
                ]
                table.append(row)
            LOG.info('Provision Details (Tested Kloud)\n' +
                     tabulate(table, headers="firstrow", tablefmt="psql"))

        table = [["VM Name", "Host", "Internal IP", "Floating IP", "Subnet"]]
        client_list = self.testing_kloud.get_all_instances(
            include_kb_proxy=True)
        for instance in client_list:
            row = [
                instance.vm_name, instance.host, instance.fixed_ip,
                instance.fip_ip, instance.subnet_ip
            ]
            table.append(row)
        LOG.info('Provision Details (Testing Kloud)\n' +
                 tabulate(table, headers="firstrow", tablefmt="psql"))

    def gen_server_user_data(self, test_mode):
        LOG.info("Preparing metadata for VMs... (Server)")
        server_list = self.kloud.get_all_instances()
        idx = 0
        KBScheduler.setup_vm_placement('Server', server_list, self.topology,
                                       self.kloud.placement_az, "Round-robin")
        if test_mode == 'http':
            for ins in server_list:
                ins.user_data['role'] = 'HTTP_Server'
                ins.user_data['http_server_configs'] = ins.config[
                    'http_server_configs']
                ins.boot_info['flavor_type'] = FLAVOR_KB_SERVER
                ins.boot_info['user_data'] = str(ins.user_data)
        elif test_mode == 'multicast':
            # Nuttcp tests over first /25
            # Multicast Listeners over second /25
            mc_ad_st = self.client_cfg['multicast_tool_configs'][
                'multicast_address_start']
            listener_addr_start = mc_ad_st.split(".")
            listener_addr_start[-1] = "128"
            naddrs = self.client_cfg['multicast_tool_configs']['addresses'][-1]
            clocks = " ".join(
                self.client_cfg['multicast_tool_configs']['ntp_clocks'])
            nports = self.client_cfg['multicast_tool_configs']['ports'][-1]
            cfgs = self.client_cfg['multicast_tool_configs']
            listener_addr_start = ".".join(listener_addr_start)
            for ins in server_list:
                ins.user_data['role'] = 'Multicast_Server'
                ins.user_data['n_id'] = idx
                idx += 1
                ins.user_data['multicast_server_configs'] = cfgs
                ins.user_data['multicast_addresses'] = naddrs
                ins.user_data['multicast_ports'] = nports
                ins.user_data['multicast_start_address'] = mc_ad_st
                ins.user_data[
                    'multicast_listener_address_start'] = listener_addr_start
                ins.user_data['ntp_clocks'] = clocks
                ins.user_data[
                    'pktsizes'] = self.client_cfg.multicast_tool_configs.pktsizes
                ins.boot_info['flavor_type'] = FLAVOR_KB_SERVER
                ins.boot_info['user_data'] = str(ins.user_data)

    def gen_client_user_data(self, test_mode):
        LOG.info("Preparing metadata for VMs... (Client)")
        client_list = self.testing_kloud.get_all_instances()
        KBScheduler.setup_vm_placement('Client', client_list, self.topology,
                                       self.testing_kloud.placement_az,
                                       "Round-robin")
        if test_mode != 'storage':
            role = 'HTTP_Client' if test_mode == 'http' else 'Multicast_Client'
            algo = '1:1' if test_mode == 'http' else '1:n'
            server_list = self.kloud.get_all_instances()
            clocks = " ".join(
                self.client_cfg['multicast_tool_configs']['ntp_clocks'])
            KBScheduler.setup_vm_mappings(client_list, server_list, algo)
            for idx, ins in enumerate(client_list):
                ins.user_data['role'] = role
                ins.user_data['vm_name'] = ins.vm_name
                ins.user_data['redis_server'] = self.kb_proxy.fixed_ip
                ins.user_data['redis_server_port'] = 6379
                ins.user_data['target_subnet_ip'] = server_list[idx].subnet_ip
                ins.user_data['target_shared_interface_ip'] = server_list[
                    idx].shared_interface_ip
                if role == 'Multicast_Client':
                    ins.user_data['ntp_clocks'] = clocks
                ins.boot_info['flavor_type'] = FLAVOR_KB_CLIENT
                ins.boot_info['user_data'] = str(ins.user_data)
        else:
            for idx, ins in enumerate(client_list):
                ins.user_data['role'] = 'Storage_Client'
                ins.user_data['vm_name'] = ins.vm_name
                ins.user_data['redis_server'] = self.kb_proxy.fixed_ip
                ins.user_data['redis_server_port'] = 6379
                ins.boot_info['flavor_type'] = FLAVOR_KB_CLIENT
                ins.boot_info['user_data'] = str(ins.user_data)

    def gen_metadata(self):
        self.final_result = {}
        self.final_result['time'] = datetime.datetime.now().strftime(
            '%Y-%m-%d %H:%M:%S')
        self.final_result[
            'test_mode'] = 'storage' if self.storage_mode else 'http'
        if self.storage_mode:
            self.final_result[
                'storage_target'] = self.client_cfg.storage_stage_configs.target
        if self.multicast_mode:
            self.final_result['test_mode'] = 'multicast'
        self.final_result['version'] = __version__
        self.final_result['kb_result'] = []

    def run(self):
        try:
            self.stage()
            self.run_test()
        except KBException as e:
            LOG.error(e.message)
        except base_network.KBGetProvNetException:
            pass
        except Exception:
            traceback.print_exc()
        except KeyboardInterrupt:
            LOG.info('Terminating KloudBuster...')
        finally:
            if self.server_cfg['cleanup_resources'] and self.client_cfg[
                    'cleanup_resources']:
                self.cleanup()

    def stage(self):
        """
        Staging all resources for running KloudBuster Tests
        """
        vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
        tenant_quota = self.calc_tenant_quota()
        if not self.storage_mode:
            self.kloud = Kloud(self.server_cfg,
                               self.server_cred,
                               self.tenants_list['server'],
                               self.server_vm_img,
                               storage_mode=self.storage_mode,
                               multicast_mode=self.multicast_mode)
            self.server_vm_create_thread = threading.Thread(
                target=self.kloud.create_vms, args=[vm_creation_concurrency])
            self.server_vm_create_thread.daemon = True
        self.testing_kloud = Kloud(self.client_cfg,
                                   self.client_cred,
                                   self.tenants_list['client'],
                                   self.client_vm_img,
                                   testing_side=True,
                                   storage_mode=self.storage_mode,
                                   multicast_mode=self.multicast_mode)
        self.client_vm_create_thread = threading.Thread(
            target=self.testing_kloud.create_vms,
            args=[vm_creation_concurrency])
        self.client_vm_create_thread.daemon = True

        if not self.storage_mode:
            self.kloud.create_resources(tenant_quota['server'])
        self.testing_kloud.create_resources(tenant_quota['client'])

        # Setting up the KloudBuster Proxy node
        client_list = self.testing_kloud.get_all_instances()
        self.kb_proxy = client_list[-1]
        client_list.pop()

        self.kb_proxy.vm_name = 'KB-PROXY'
        self.kb_proxy.user_data['role'] = 'KB-PROXY'
        self.kb_proxy.boot_info['flavor_type'] = FLAVOR_KB_PROXY
        if self.topology:
            proxy_hyper = self.topology.clients_rack[0]
            self.kb_proxy.boot_info['avail_zone'] = \
                "%s:%s" % (self.testing_kloud.placement_az, proxy_hyper) \
                if self.testing_kloud.placement_az else "nova:%s" % (proxy_hyper)

        self.kb_proxy.boot_info['user_data'] = str(self.kb_proxy.user_data)
        self.testing_kloud.create_vm(self.kb_proxy)
        if self.storage_mode:
            self.kb_runner = KBRunner_Storage(client_list, self.client_cfg)
        elif self.multicast_mode:
            self.kb_runner = KBRunner_Multicast(client_list, self.client_cfg,
                                                self.single_cloud)

        else:
            self.kb_runner = KBRunner_HTTP(client_list, self.client_cfg,
                                           self.single_cloud)

        self.kb_runner.setup_redis(self.kb_proxy.fip_ip
                                   or self.kb_proxy.fixed_ip)
        if self.client_cfg.progression['enabled'] and not self.multicast_mode:
            log_info = "Progression run is enabled, KloudBuster will schedule " \
                       "multiple runs as listed:"
            stage = 1
            start = self.client_cfg.progression.vm_start
            multiple = self.client_cfg.progression.vm_multiple
            cur_vm_count = 1 if start else multiple
            # Minus 1 for KB-Proxy
            total_vm = self.get_tenant_vm_count(self.client_cfg) - 1
            while (cur_vm_count <= total_vm):
                log_info += "\n" + self.kb_runner.header_formatter(
                    stage, cur_vm_count)
                cur_vm_count = (stage + 1 - start) * multiple
                stage += 1
            LOG.info(log_info)

        if self.single_cloud and not self.storage_mode and not self.multicast_mode:
            # Find the shared network if the cloud used to testing is same
            # Attach the router in tested kloud to the shared network
            shared_net = self.testing_kloud.get_first_network()
            self.kloud.attach_to_shared_net(shared_net)

        # Create VMs in both tested and testing kloud concurrently
        user_data_mode = "multicast" if self.multicast_mode else "http"
        if self.storage_mode:
            self.gen_client_user_data("storage")
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        elif self.single_cloud:
            self.gen_server_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.gen_client_user_data(user_data_mode)
            self.client_vm_create_thread.start()
            self.client_vm_create_thread.join()
        else:
            self.gen_server_user_data(user_data_mode)
            self.gen_client_user_data(user_data_mode)
            self.server_vm_create_thread.start()
            self.client_vm_create_thread.start()
            self.server_vm_create_thread.join()
            self.client_vm_create_thread.join()

        if self.testing_kloud and self.testing_kloud.exc_info:
            raise self.testing_kloud.exc_info[
                1], None, self.testing_kloud.exc_info[2]

        if self.kloud and self.kloud.exc_info:
            raise self.kloud.exc_info[1], None, self.kloud.exc_info[2]

        # Function that print all the provisioning info
        self.print_provision_info()

    def run_test(self, test_only=False):
        self.gen_metadata()
        self.kb_runner.config = self.client_cfg
        # Run the runner to perform benchmarkings
        for run_result in self.kb_runner.run(test_only):
            if not self.multicast_mode or len(
                    self.final_result['kb_result']) == 0:
                self.final_result['kb_result'].append(
                    self.kb_runner.tool_result)
        LOG.info('SUMMARY: %s' % self.final_result)

    def stop_test(self):
        self.kb_runner.stop()
        LOG.info('Testing is stopped by request.')

    def cleanup(self):
        # Stop the runner, shutdown the redis thread
        if self.kb_runner:
            try:
                self.kb_runner.dispose()
            except Exception:
                pass

        # Cleanup: start with tested side first
        # then testing side last (order is important because of the shared network)
        cleanup_flag = False
        try:
            cleanup_flag = self.kloud.delete_resources(
            ) if not self.storage_mode else True
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning(
                'Some resources in server cloud are not cleaned up properly.')
            KBResLogger.dump_and_save('svr',
                                      self.kloud.res_logger.resource_list)

        cleanup_flag = False
        try:
            if self.testing_kloud:
                cleanup_flag = self.testing_kloud.delete_resources()
        except Exception:
            traceback.print_exc()
        if not cleanup_flag:
            LOG.warning(
                'Some resources in client cloud are not cleaned up properly.')
            KBResLogger.dump_and_save(
                'clt', self.testing_kloud.res_logger.resource_list)

        # Set the kloud to None
        self.kloud = None
        self.testing_kloud = None

    def dump_logs(self, offset=0):
        if not self.fp_logfile:
            return ""

        self.fp_logfile.seek(offset)
        return self.fp_logfile.read()

    def dispose(self):
        if self.fp_logfile:
            self.fp_logfile.close()
        logging.delete_logfile('kloudbuster', self.fp_logfile.name)
        self.fp_logfile = None

    def get_tenant_vm_count(self, config):
        # this does not apply for storage mode!
        return (config['routers_per_tenant'] * config['networks_per_router'] *
                config['vms_per_network'])

    def calc_neutron_quota(self):
        total_vm = self.get_tenant_vm_count(self.server_cfg)

        server_quota = {}
        server_quota['network'] = self.server_cfg['routers_per_tenant'] * \
            self.server_cfg['networks_per_router']
        server_quota['subnet'] = server_quota['network']
        server_quota['router'] = self.server_cfg['routers_per_tenant']
        if (self.server_cfg['use_floatingip']):
            # (1) Each VM has one floating IP
            # (2) Each Router has one external IP
            server_quota['floatingip'] = total_vm + server_quota['router']
            # (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s)
            # (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s)
            # (3) Each Network has one router_interface (gateway), and one DHCP agent, total of
            #     server_quota['network'] * 2 port(s)
            # (4) Each Router has one external IP, takes up 1 port, total of
            #     server_quota['router'] port(s)
            server_quota['port'] = 2 * total_vm + 2 * server_quota['network'] + \
                server_quota['router'] + 10
        else:
            server_quota['floatingip'] = server_quota['router']
            server_quota['port'] = total_vm + 2 * server_quota['network'] + \
                server_quota['router'] + 10
        server_quota['security_group'] = server_quota['network'] + 1
        server_quota[
            'security_group_rule'] = server_quota['security_group'] * 10

        client_quota = {}
        total_vm = self.get_tenant_vm_count(self.client_cfg)
        client_quota['network'] = 1
        client_quota['subnet'] = 1
        client_quota['router'] = 1
        if (self.client_cfg['use_floatingip']):
            # (1) Each VM has one floating IP
            # (2) Each Router has one external IP, total of 1 router
            # (3) KB-Proxy node has one floating IP
            client_quota['floatingip'] = total_vm + 1 + 1
            # (1) Each VM Floating IP takes up 1 port, total of $total_vm port(s)
            # (2) Each VM Fixed IP takes up 1 port, total of $total_vm port(s)
            # (3) Each Network has one router_interface (gateway), and one DHCP agent, total of
            #     client_quota['network'] * 2 port(s)
            # (4) KB-Proxy node takes up 2 ports, one for fixed IP, one for floating IP
            # (5) Each Router has one external IP, takes up 1 port, total of 1 router/port
            client_quota['port'] = 2 * total_vm + 2 * client_quota[
                'network'] + 2 + 1 + 10
        else:
            client_quota['floatingip'] = 1 + 1
            client_quota[
                'port'] = total_vm + 2 * client_quota['network'] + 2 + 1
        if self.single_cloud:
            # Under single-cloud mode, the shared network is attached to every router in server
            # cloud, and each one takes up 1 port on client side.
            client_quota[
                'port'] = client_quota['port'] + server_quota['router'] + 10
        client_quota['security_group'] = client_quota['network'] + 1
        client_quota[
            'security_group_rule'] = client_quota['security_group'] * 10

        return [server_quota, client_quota]

    def calc_nova_quota(self):
        server_quota = {}
        client_quota = {}
        if self.storage_mode:
            # in case of storage, the number of VMs is to be taken from the
            # the storage config
            total_vm = self.client_cfg['storage_stage_configs']['vm_count']
        else:
            total_vm = self.get_tenant_vm_count(self.server_cfg)
            server_quota['instances'] = total_vm
            server_quota[
                'cores'] = total_vm * self.server_cfg['flavor']['vcpus']
            server_quota['ram'] = total_vm * self.server_cfg['flavor']['ram']
            LOG.info(
                'Server tenant Nova quotas: instances=%d vcpus=%d ram=%dMB',
                server_quota['instances'], server_quota['cores'],
                server_quota['ram'])
            total_vm = self.get_tenant_vm_count(self.client_cfg)

        # add 1 for the proxy
        client_quota['instances'] = total_vm + 1
        client_quota[
            'cores'] = total_vm * self.client_cfg['flavor']['vcpus'] + 1
        client_quota[
            'ram'] = total_vm * self.client_cfg['flavor']['ram'] + 2048
        LOG.info('Client tenant Nova quotas: instances=%d vcpus=%d ram=%dMB',
                 client_quota['instances'], client_quota['cores'],
                 client_quota['ram'])
        return [server_quota, client_quota]

    def calc_cinder_quota(self):
        # Cinder quotas are only set for storage mode
        # Since storage mode only uses client tenant
        # Server tenant cinder quota is only used for non-storage case
        # we can leave the server quota empty
        server_quota = {}

        # Client tenant quota is based on the number of
        # storage VMs and disk size per VM
        # (note this is not the flavor disk size!)
        client_quota = {}
        if self.storage_mode:
            storage_cfg = self.client_cfg['storage_stage_configs']
            vm_count = storage_cfg['vm_count']
            client_quota['gigabytes'] = vm_count * storage_cfg['disk_size']
            client_quota['volumes'] = vm_count
            LOG.info('Cinder quotas: volumes=%d storage=%dGB', vm_count,
                     client_quota['gigabytes'])
        return [server_quota, client_quota]

    def calc_tenant_quota(self):
        quota_dict = {'server': {}, 'client': {}}
        nova_quota = self.calc_nova_quota()
        neutron_quota = self.calc_neutron_quota()
        cinder_quota = self.calc_cinder_quota()
        for idx, val in enumerate(['server', 'client']):
            quota_dict[val]['nova'] = nova_quota[idx]
            quota_dict[val]['neutron'] = neutron_quota[idx]
            quota_dict[val]['cinder'] = cinder_quota[idx]

        return quota_dict