예제 #1
0
    def _create_multi_ucsm_dict(self):
        """Creates a dictionary of all UCS Manager data from config."""
        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(cfg.CONF.config_file)

        if len(read_ok) != len(cfg.CONF.config_file):
            raise cfg.Error(_('Some config files were not parsed properly'))

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                dev_id, sep, dev_ip = parsed_item.partition(':')
                if dev_id.lower() == 'ml2_cisco_ucsm_ip':
                    ucsm_info = []
                    for dev_key, value in parsed_file[parsed_item].items():
                        ucsm_info.append(value[0])
                    self.ucsm_dict[dev_ip] = ucsm_info
예제 #2
0
    def __call__(self, value):
        if isinstance(value, dict):
            return value

        templates = {}
        template_mappings = (value or "").split()

        for mapping in template_mappings:
            data = mapping.split(":")
            if len(data) != 3:
                raise cfg.Error(
                    _("UCS Mech Driver: Invalid VNIC Template "
                      "config: %s") % mapping)
            data[1] = data[1] or const.VNIC_TEMPLATE_PARENT_DN
            templates[data[0]] = UCSTemplate(data[1], data[2])
        return templates
예제 #3
0
def parse_ucsm_host_config():
    sp_dict = {}
    host_dict = {}
    if cfg.CONF.ml2_cisco_ucsm.ucsm_host_list:
        host_config_list = cfg.CONF.ml2_cisco_ucsm.ucsm_host_list
        for host in host_config_list:
            hostname, sep, service_profile = host.partition(':')
            if not sep or not service_profile:
                raise cfg.Error(
                    _("UCS Mech Driver: Invalid Host Service "
                      "Profile config: %s") % host)
            key = (cfg.CONF.ml2_cisco_ucsm.ucsm_ip, hostname)
            sp_dict[key] = (const.SERVICE_PROFILE_PATH_PREFIX +
                            service_profile.strip())
            host_dict[hostname] = cfg.CONF.ml2_cisco_ucsm.ucsm_ip
        return sp_dict, host_dict
예제 #4
0
    def _create_ml2_mech_device_cisco_dictionary(self):
        """Create the ML2 device cisco dictionary.

        Read data from the ml2_conf_cisco.ini device supported sections.
        """
        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(cfg.CONF.config_file)

        if len(read_ok) != len(cfg.CONF.config_file):
            raise cfg.Error(_("Some config files were not parsed properly"))

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                dev_id, sep, dev_ip = parsed_item.partition(':')
                if dev_id.lower() == 'ml2_mech_cisco_nexus':
                    for dev_key, value in parsed_file[parsed_item].items():
                        self.nexus_dict[dev_ip, dev_key] = value[0]
예제 #5
0
    def __call__(self, value):
        if isinstance(value, dict):
            return value

        templates = {}
        template_mappings = (value or "").split()

        for mapping in template_mappings:
            data = mapping.split(":")
            if len(data) != 3:
                raise cfg.Error(
                    _('UCS Mech Driver: Invalid Service '
                      'Profile Template config %s') % mapping)
            host_list = data[2].split(',')
            for host in host_list:
                templates[host] = UCSTemplate(data[0], data[1])
        return templates
예제 #6
0
    def _create_multi_ucsm_dicts(self):
        """Creates a dictionary of all UCS Manager data from config."""
        username = None
        password = None
        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(cfg.CONF.config_file)

        if len(read_ok) != len(cfg.CONF.config_file):
            raise cfg.Error(_('Some config files were not parsed properly'))

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                dev_id, sep, dev_ip = parsed_item.partition(':')
                dev_ip = dev_ip.strip()
                if dev_id.lower() == 'ml2_cisco_ucsm_ip':
                    ucsm_info = []
                    eth_port_list = []
                    for dev_key, value in parsed_file[parsed_item].items():
                        config_item = dev_key.lower()
                        if config_item == 'ucsm_virtio_eth_ports':
                            for eth_port in value[0].split(','):
                                eth_port_list.append(
                                    const.ETH_PREFIX + str(eth_port).strip())
                            self.ucsm_port_dict[dev_ip] = eth_port_list
                        elif config_item == 'sp_template_list':
                            self._parse_sp_template_list(dev_ip, value)
                            self.sp_template_mode = True
                        elif config_item == 'vnic_template_list':
                            self._parse_vnic_template_list(dev_ip, value)
                            self.vnic_template_mode = True
                        elif config_item == 'sriov_qos_policy':
                            LOG.debug('QoS Policy: %s', value[0].strip())
                            self.sriov_qos_policy[dev_ip] = value[0].strip()
                        elif dev_key.lower() == 'ucsm_username':
                            username = value[0].strip()
                        else:
                            password = value[0].strip()
                        ucsm_info = (username, password)
                        self.ucsm_dict[dev_ip] = ucsm_info
                        self.multi_ucsm_mode = True
                if dev_id.lower() == 'sriov_multivlan_trunk':
                    for dev_key, value in parsed_file[parsed_item].items():
                        self._parse_sriov_multivlan_trunk_config(dev_key,
                                                                 value)
예제 #7
0
 def _vnic_template_data_for_ucsm_ip(self, ucsm_ip):
     if ucsm_ip == CONF.ml2_cisco_ucsm.ucsm_ip:
         template_list = CONF.ml2_cisco_ucsm.vnic_template_list
     elif ucsm_ip in CONF.ml2_cisco_ucsm.ucsms:
         template_list = (
             CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].vnic_template_list)
     else:
         return []
     mappings = []
     vnic_template_mappings = template_list.split()
     for mapping in vnic_template_mappings:
         data = mapping.split(":")
         if len(data) != 3:
             raise cfg.Error(
                 _("UCS Mech Driver: Invalid VNIC Template "
                   "config: %s") % mapping)
         data[1] = data[1] or const.VNIC_TEMPLATE_PARENT_DN
         mappings.append(data)
     return mappings
 def __init__(self):
     auth_url = cfg.CONF.keystone_authtoken.auth_uri
     auth_user = cfg.CONF.keystone_authtoken.admin_user
     auth_password = cfg.CONF.keystone_authtoken.admin_password
     auth_tenant = cfg.CONF.keystone_authtoken.admin_tenant_name
     self.version = self._get_api_version(auth_url)
     if self.version == 'v2':
         LOG.debug("auth_url=%(auth_url)s, auth_user=%(auth_user)s, "
                   "auth_tenant=%(auth_tenant)s",
                   {'auth_url': auth_url, 'auth_user': auth_user,
                    'auth_tenant': auth_tenant})
         self.keystone_client = clientv2.Client(
             auth_url=auth_url,
             username=auth_user,
             password=auth_password,
             tenant_name=auth_tenant,
         )
     elif self.version == 'v3':
         raise cfg.Error('keystone api v3 is not supported yet')
예제 #9
0
 def _all_sp_templates(self):
     sp_templates = {}
     ucsms = dict(CONF.ml2_cisco_ucsm.ucsms)
     if (CONF.ml2_cisco_ucsm.ucsm_ip
             and CONF.ml2_cisco_ucsm.sp_template_list):
         ucsms[CONF.ml2_cisco_ucsm.ucsm_ip] = {
             'sp_template_list': CONF.ml2_cisco_ucsm.sp_template_list,
         }
     for ip, ucsm in ucsms.items():
         sp_template_mappings = (ucsm.get('sp_template_list') or "").split()
         for mapping in sp_template_mappings:
             data = mapping.split(":")
             if len(data) != 3:
                 raise cfg.Error(
                     _('UCS Mech Driver: Invalid Service '
                       'Profile Template config %s') % mapping)
             host_list = data[2].split(',')
             for host in host_list:
                 sp_templates[host] = (ip, data[0], data[1])
     return sp_templates
예제 #10
0
 def _parse_sp_template_list(self, ucsm_ip, sp_template_config):
     sp_template_list = []
     for sp_template_temp in sp_template_config:
         sp_template_list = sp_template_temp.split()
         for sp_template in sp_template_list:
             sp_template_path, sep, template_hosts = (
                 sp_template.partition(':'))
             if not sp_template_path or not sep or not template_hosts:
                 raise cfg.Error(_('UCS Mech Driver: Invalid Service '
                                   'Profile Template config %s')
                                 % sp_template_config)
             sp_temp, sep, hosts = template_hosts.partition(':')
             LOG.debug('SP Template Path: %s, SP Template: %s, '
                 'Hosts: %s', sp_template_path, sp_temp, hosts)
             host_list = hosts.split(',')
             for host in host_list:
                 value = (ucsm_ip, sp_template_path, sp_temp)
                 self.sp_template_dict[host] = value
                 LOG.debug('SP Template Dict key: %s, value: %s',
                           host, value)
예제 #11
0
    def _fetch_and_store_cert(self, server, port, path):
        """_fetch_and_store_cert

        Grabs a certificate from a server and writes it to
        a given path.
        """
        try:
            cert = ssl.get_server_certificate((server, port),
                                              ssl_version=ssl.PROTOCOL_SSLv23)
        except Exception as e:
            raise cfg.Error(_('Could not retrieve initial '
                              'certificate from controller %(server)s. '
                              'Error details: %(error)s') %
                            {'server': server, 'error': e})

        LOG.warning("Storing to certificate for host %(server)s "
                    "at %(path)s", {'server': server, 'path': path})
        self._file_put_contents(path, cert)

        return cert
예제 #12
0
def main():
    logging.register_options(cfg.CONF)
    _prepare_service(sys.argv)
    gmr.TextGuruMeditation.setup_autorun(version)
    sm = status_manager.StatusManager()
    signal.signal(signal.SIGHUP, _mutate_config)

    # most intervals lower than 60 would cause a health check to start while the last hasn't completed yet,
    # leading to too high CPU utilization
    min_health_check_interval = 60
    if CONF.status_manager.health_check_interval < min_health_check_interval:
        raise cfg.Error(
            msg=
            "To prevent device overload, the health check interval must not be lower than {}"
            .format(min_health_check_interval))

    @periodics.periodic(CONF.status_manager.health_check_interval,
                        run_immediately=True)
    def periodic_status():
        sm.heartbeat()

    status_check = periodics.PeriodicWorker(
        [(periodic_status, None, None)],
        schedule_strategy='aligned_last_finished')

    hm_status_thread = threading.Thread(target=status_check.start)
    hm_status_thread.daemon = True
    LOG.info("Status Manager process starts")
    hm_status_thread.start()

    def hm_exit(*args, **kwargs):
        status_check.stop()
        status_check.wait()
        sm.stats_executor.shutdown()
        sm.health_executor.shutdown()
        LOG.info("Status Manager executors terminated")

    signal.signal(signal.SIGINT, hm_exit)

    hm_status_thread.join()
    LOG.info("Status Manager terminated")
예제 #13
0
    def __init__(self, **kwargs):
        """Initialize a new client for the plugin."""
        self.format = 'json'

        # Extract configuration parameters from the configuration file.
        self.n1kv_vsm_ips = cfg.CONF.ml2_cisco_n1kv.n1kv_vsm_ips
        self.username = cfg.CONF.ml2_cisco_n1kv.username
        self.password = cfg.CONF.ml2_cisco_n1kv.password
        self.action_prefix = 'http://%s/api/n1k'
        self.timeout = cfg.CONF.ml2_cisco_n1kv.http_timeout
        required_opts = ('n1kv_vsm_ips', 'username', 'password')
        # Validate whether required options are configured
        for opt in required_opts:
            if not getattr(self, opt):
                raise cfg.RequiredOptError(opt, 'ml2_cisco_n1kv')
        # Validate the configured VSM IP addresses
        # Note: Currently only support IPv4
        for vsm_ip in self.n1kv_vsm_ips:
            if not netaddr.valid_ipv4(vsm_ip):
                raise cfg.Error(
                    _("Cisco Nexus1000V ML2 driver config: "
                      "Invalid format for VSM IP address: %s") % vsm_ip)
예제 #14
0
def get_specific_config(prefix):
    """Retrieve config based on the format [<prefix>:<value>].

    returns: a dict, {<UUID>: {<key1>:<value1>, <key2>:<value2>, ...}}
    """
    conf_dict = {}
    multi_parser = cfg.MultiConfigParser()
    read_ok = multi_parser.read(cfg.CONF.config_file)
    if len(read_ok) != len(cfg.CONF.config_file):
        raise cfg.Error(_("Some config files were not parsed properly"))

    for parsed_file in multi_parser.parsed:
        for parsed_item in parsed_file.keys():
            p_i = parsed_item.lower()
            if p_i.startswith(prefix):
                section_type, uuid = p_i.split(':')
                if section_type == prefix:
                    conf_dict[uuid] = {
                        k: v[0]
                        for (k, v) in parsed_file[parsed_item].items()
                    }
    return conf_dict
예제 #15
0
    def _create_device_dictionary(self):
        """
        Create the device dictionary from the cisco_plugins.ini
        device supported sections. Ex. NEXUS_SWITCH, N1KV.
        """

        global first_device_ip

        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(CONF.config_file)

        if len(read_ok) != len(CONF.config_file):
            raise cfg.Error(_("Some config files were not parsed properly"))

        first_device_ip = None
        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                dev_id, sep, dev_ip = parsed_item.partition(':')
                if dev_id.lower() == 'n1kv':
                    for dev_key, value in parsed_file[parsed_item].items():
                        if dev_ip and not first_device_ip:
                            first_device_ip = dev_ip
                        device_dictionary[dev_id, dev_ip, dev_key] = value[0]
예제 #16
0
 def __init__(self, host=None, port=None, use_ssl=True, auth=None,
              access_key_id=None, secret_access_key=None, **kwargs):
     load_workflow_list = kwargs.pop('load_workflow_list', True)
     if auth is None:
         auth = self._define_auth_function(access_key_id, secret_access_key)
     if not host or not port:
         if not cfgNWA.server_url:
             raise cfg.Error("'server_url' or (host, port) "
                             "must be specified.")
         host, port, use_ssl = self._parse_server_url(cfgNWA.server_url)
     super(NwaRestClient, self).__init__(host, port, use_ssl, auth,
                                         **kwargs)
     self._post_data = None
     self.workflow_first_wait = cfg.CONF.NWA.scenario_polling_first_timer
     self.workflow_wait_sleep = cfg.CONF.NWA.scenario_polling_timer
     self.workflow_retry_count = cfg.CONF.NWA.scenario_polling_count
     LOG.info(_LI('NWA init: workflow wait: %(first_wait)ss + '
                  '%(wait_sleep)ss x %(retry_count)s times.'),
              {'first_wait': self.workflow_first_wait,
               'wait_sleep': self.workflow_wait_sleep,
               'retry_count': self.workflow_retry_count})
     if load_workflow_list and not NwaRestClient.workflow_list_is_loaded:
         self.update_workflow_list()
         NwaRestClient.workflow_list_is_loaded = True
예제 #17
0
    def force_topo_sync(self, check_ts=True):
        """Execute a topology_sync between OSP and BCF.

        Topology sync collects all data from Openstack and pushes to BCF in
        one single REST call. This is a heavy operation and is not executed
        automatically.

        Conditions when this would be called:
        (1) during ServerPool initialization
            (a) must check previous timestamp
        (2) if periodic keystone tenant_cache find a diff in tenant list
            (a) should not check previous timestamp
        (3) externally triggered by neutron force-bcf-sync command
            (a) should not check previous timestamp
        (4) a rest_call to BCF fails on both servers and failure_code is not
            part of the ignore_codes list
            (a) must check previous timestamp

        :param check_ts: boolean flag to check previous
                         timestamp < TOPO_SYNC_EXPIRED_SECS
               prev_resp: a REST response tuple from the previous failed REST
                          call if available. If we skip topo_sync, return the
                          failure as previously observed.
        :return: (sync_executed, response)
                 sync_executed - Boolean - returns True if we were able to
                                acquire a lock to perform topo_sync,
                                False otherwise
                 response - tuple of the typical HTTP response from REST call
                        (response.status, response.reason, respstr, respdata)
        """
        LOG.info(_LI('TOPO_SYNC requested with check_ts %s'), check_ts)

        if not self.get_topo_function:
            raise cfg.Error(_('Server requires synchronization, '
                              'but no topology function was defined.'))

        # get current timestamp
        curr_ts = str(time.time())
        hash_handler = cdb.HashHandler(timestamp_ms=curr_ts)

        if not hash_handler.lock(check_ts):
            LOG.info(_LI("TOPO_SYNC: lock() returned False. Skipping."))
            return False, TOPO_RESPONSE_OK

        # else, perform topo_sync
        try:
            LOG.debug("TOPO_SYNC: requested at %(request_ts)s started at "
                      "%(start_ts)s",
                      {'request_ts': cdb.convert_ts_to_datetime(curr_ts),
                       'start_ts': cdb.convert_ts_to_datetime(time.time())})
            data = self.get_topo_function(
                **self.get_topo_function_args)
            if not data:
                # when keystone sync fails, it fails silently with data = None
                # that is wrong, we need to raise an exception
                raise Exception(_("TOPO_SYNC: failed to retrieve data."))
            LOG.debug("TOPO_SYNC: data received from OSP, sending "
                      "request to BCF.")
            errstr = _("Unable to perform forced topology_sync: %s")
            return True, self.rest_action('POST', TOPOLOGY_PATH, data, errstr)
        except Exception as e:
            # if encountered an exception, set to previous timestamp
            LOG.warning(_LW("TOPO_SYNC: Exception during topology sync. "
                            "Consistency DB timestamp will not be updated."))
            hash_handler.unlock(set_prev_ts=True)
            raise e
        finally:
            hash_handler.unlock()
            diff = time.time() - float(hash_handler.lock_ts)
            LOG.info(_LI("TOPO_SYNC: took %s seconds to execute topo_sync. "
                         "consistency_db unlocked."),
                     str(diff))
예제 #18
0
    def rest_call(self,
                  action,
                  resource,
                  data,
                  headers,
                  ignore_codes,
                  timeout=False):
        context = self.get_context_ref()
        if context:
            # include the requesting context information if available
            cdict = context.to_dict()
            # remove the auth token so it's not present in debug logs on the
            # backend controller
            cdict.pop('auth_token', None)
            headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict)
        hash_handler = cdb.HashHandler()
        good_first = sorted(self.servers, key=lambda x: x.failed)
        first_response = None
        for active_server in good_first:
            LOG.debug(
                "ServerProxy: %(action)s to servers: "
                "%(server)r, %(resource)s" % {
                    'action': action,
                    'server': (active_server.server, active_server.port),
                    'resource': resource
                })
            for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
                ret = active_server.rest_call(action,
                                              resource,
                                              data,
                                              headers,
                                              timeout,
                                              reconnect=self.always_reconnect,
                                              hash_handler=hash_handler)
                if ret[0] != httplib.SERVICE_UNAVAILABLE:
                    break
                time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)

            # If inconsistent, do a full synchronization
            if ret[0] == httplib.CONFLICT:
                if not self.get_topo_function:
                    raise cfg.Error(
                        _('Server requires synchronization, '
                          'but no topology function was defined.'))

                LOG.info(
                    _LI("ServerProxy: HashConflict detected with request "
                        "%(action)s %(resource)s Starting Topology sync"), {
                            'action': action,
                            'resource': resource
                        })
                self._topo_sync_in_progress = True
                eventlet.spawn_n(self.keep_updating_lock)
                try:
                    data = self.get_topo_function(
                        **self.get_topo_function_args)
                    if data:
                        data = self._sanitize_data_for_topo_sync(data)
                        ret_ts = active_server.rest_call('POST',
                                                         TOPOLOGY_PATH,
                                                         data,
                                                         timeout=None)
                        if self.server_failure(ret_ts, ignore_codes):
                            LOG.error(_LE("ServerProxy: Topology sync failed"))
                            raise RemoteRestError(reason=ret_ts[2],
                                                  status=ret_ts[0])
                finally:
                    LOG.info(_LI("ServerProxy: Topology sync completed"))
                    self._topo_sync_in_progress = False
                    if data is None:
                        return None

            # Store the first response as the error to be bubbled up to the
            # user since it was a good server. Subsequent servers will most
            # likely be cluster slaves and won't have a useful error for the
            # user (e.g. 302 redirect to master)
            if not first_response:
                first_response = ret
            if not self.server_failure(ret, ignore_codes):
                active_server.failed = False
                LOG.debug(
                    "ServerProxy: %(action)s succeed for servers: "
                    "%(server)r Response: %(response)s" % {
                        'action': action,
                        'server': (active_server.server, active_server.port),
                        'response': ret[3]
                    })
                return ret
            else:
                LOG.warning(
                    _LW('ServerProxy: %(action)s failure for servers:'
                        '%(server)r Response: %(response)s'), {
                            'action': action,
                            'server':
                            (active_server.server, active_server.port),
                            'response': ret[3]
                        })
                LOG.warning(
                    _LW("ServerProxy: Error details: "
                        "status=%(status)d, reason=%(reason)r, "
                        "ret=%(ret)s, data=%(data)r"), {
                            'status': ret[0],
                            'reason': ret[1],
                            'ret': ret[2],
                            'data': ret[3]
                        })
                active_server.failed = True

        # A failure on a delete means the object is gone from Neutron but not
        # from the controller. Set the consistency hash to a bad value to
        # trigger a sync on the next check.
        # NOTE: The hash must have a comma in it otherwise it will be ignored
        # by the backend.
        if action == 'DELETE':
            hash_handler.put_hash('INCONSISTENT,INCONSISTENT')
        # All servers failed, reset server list and try again next time
        LOG.error(
            _LE('ServerProxy: %(action)s failure for all servers: '
                '%(server)r'), {
                    'action': action,
                    'server': tuple((s.server, s.port) for s in self.servers)
                })
        return first_response
예제 #19
0
    def __init__(self,
                 timeout=False,
                 base_uri=BASE_URI,
                 name='NeutronRestProxy'):
        LOG.debug("ServerPool: initializing")
        # 'servers' is the list of network controller REST end-points
        # (used in order specified till one succeeds, and it is sticky
        # till next failure). Use 'server_auth' to encode api-key
        servers = cfg.CONF.RESTPROXY.servers
        self.auth = cfg.CONF.RESTPROXY.server_auth
        self.ssl = cfg.CONF.RESTPROXY.server_ssl
        self.neutron_id = cfg.CONF.RESTPROXY.neutron_id
        self.user_domain_id = KS3_DEFAULT_DOMAIN_ID
        self.project_domain_id = KS3_DEFAULT_DOMAIN_ID
        if 'keystone_authtoken' in cfg.CONF:
            self.auth_url = cfg.CONF.keystone_authtoken.auth_uri
            self.auth_user = cfg.CONF.keystone_authtoken.admin_user
            self.auth_password = cfg.CONF.keystone_authtoken.admin_password
            self.auth_tenant = cfg.CONF.keystone_authtoken.admin_tenant_name
        else:
            self.auth_url = cfg.CONF.RESTPROXY.auth_url
            self.auth_user = cfg.CONF.RESTPROXY.auth_user
            self.auth_password = cfg.CONF.RESTPROXY.auth_password
            self.auth_tenant = cfg.CONF.RESTPROXY.auth_tenant

        # Use Keystonev3 URL for authentication
        if "v2.0" in self.auth_url:
            self.auth_url = self.auth_url.replace("v2.0", "v3")
        elif "v3" not in self.auth_url:
            self.auth_url = "%s/v3" % self.auth_url

        self.base_uri = base_uri
        self.name = name
        self.contexts = {}
        # Cache for Openstack projects
        # The cache is maintained in a separate thread and sync'ed with
        # Keystone periodically.
        self.keystone_tenants = {}
        self._update_tenant_cache(reconcile=False)
        self.timeout = cfg.CONF.RESTPROXY.server_timeout
        self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections
        default_port = 8000
        if timeout is not False:
            self.timeout = timeout
        self._topo_sync_in_progress = False

        # Function to use to retrieve topology for consistency syncs.
        # Needs to be set by module that uses the servermanager.
        self.get_topo_function = None
        self.get_topo_function_args = {}

        if not servers:
            raise cfg.Error(_('Servers not defined. Aborting server manager.'))
        servers = [
            s if len(s.rsplit(':', 1)) == 2 else "%s:%d" % (s, default_port)
            for s in servers
        ]
        if any((len(spl) != 2 or not spl[1].isdigit())
               for spl in [sp.rsplit(':', 1) for sp in servers]):
            raise cfg.Error(
                _('Servers must be defined as <ip>:<port>. '
                  'Configuration was %s') % servers)
        self.servers = []
        for s in servers:
            server, port = s.rsplit(':', 1)
            if server.startswith("[") and server.endswith("]"):
                # strip [] for ipv6 address
                server = server[1:-1]
            self.servers.append(self.server_proxy_for(server, int(port)))
        self.start_background_tasks()
        ServerPool._instance = self
        LOG.debug("ServerPool: initialization done")
 def get_projects(self):
     if self.version == 'v2':
         projects = self.keystone_client.tenants.list()
     elif self.version == 'v3':
         raise cfg.Error('keystone api v3 is not supported yet')
     return {p.id: p.name for p in projects}