Ejemplo n.º 1
0
    def __init__(self, timeout=10, base_uri=BASE_URI, name='NeutronRestProxy'):
        LOG.debug(_("ServerPool: initializing"))
        # 'servers' is the list of network controller REST end-points
        # (used in order specified till one succeeds, and it is sticky
        # till next failure). Use 'server_auth' to encode api-key
        servers = cfg.CONF.RESTPROXY.servers
        self.auth = cfg.CONF.RESTPROXY.server_auth
        self.ssl = cfg.CONF.RESTPROXY.server_ssl
        self.neutron_id = cfg.CONF.RESTPROXY.neutron_id
        self.base_uri = base_uri
        self.name = name
        self.timeout = cfg.CONF.RESTPROXY.server_timeout
        default_port = 8000
        if timeout is not None:
            self.timeout = timeout

        if not servers:
            raise cfg.Error(_('Servers not defined. Aborting server manager.'))
        servers = [
            s if len(s.rsplit(':', 1)) == 2 else "%s:%d" % (s, default_port)
            for s in servers
        ]
        if any(
            (len(spl) != 2) for spl in [sp.rsplit(':', 1) for sp in servers]):
            raise cfg.Error(
                _('Servers must be defined as <ip>:<port>. '
                  'Configuration was %s') % servers)
        self.servers = [
            self.server_proxy_for(server, int(port))
            for server, port in (s.rsplit(':', 1) for s in servers)
        ]
        LOG.debug(_("ServerPool: initialization done"))
Ejemplo n.º 2
0
    def _get_combined_cert_for_server(self, server, port):
        # The ssl library requires a combined file with all trusted certs
        # so we make one containing the trusted CAs and the corresponding
        # host cert for this server
        combined_cert = None
        if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation:
            base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory
            host_dir = os.path.join(base_ssl, 'host_certs')
            ca_dir = os.path.join(base_ssl, 'ca_certs')
            combined_dir = os.path.join(base_ssl, 'combined')
            combined_cert = os.path.join(combined_dir, '%s.pem' % server)
            if not os.path.exists(base_ssl):
                raise cfg.Error(
                    _('ssl_cert_directory [%s] does not exist. '
                      'Create it or disable ssl.') % base_ssl)
            for automake in [combined_dir, ca_dir, host_dir]:
                if not os.path.exists(automake):
                    os.makedirs(automake)

            # get all CA certs
            certs = self._get_ca_cert_paths(ca_dir)

            # check for a host specific cert
            hcert, exists = self._get_host_cert_path(host_dir, server)
            if exists:
                certs.append(hcert)
            elif cfg.CONF.RESTPROXY.ssl_sticky:
                self._fetch_and_store_cert(server, port, hcert)
                certs.append(hcert)
            if not certs:
                raise cfg.Error(
                    _('No certificates were found to verify '
                      'controller %s') % (server))
            self._combine_certs_to_file(certs, combined_cert)
        return combined_cert
Ejemplo n.º 3
0
    def __init__(self,
                 servers,
                 auth,
                 ssl,
                 no_ssl_validation,
                 ssl_sticky,
                 ssl_cert_directory,
                 consistency_interval,
                 timeout=False,
                 cache_connections=False,
                 base_uri=BASE_URI,
                 success_codes=SUCCESS_CODES,
                 failure_codes=FAILURE_CODES,
                 name='NeutronRestProxy'):
        LOG.debug(_("ServerPool: initializing"))
        # 'servers' is the list of network controller REST end-points
        # (used in order specified till one succeeds, and it is sticky
        # till next failure). Use 'server_auth' to encode api-key
        self.auth = auth
        self.ssl = ssl
        self.base_uri = base_uri
        self.success_codes = success_codes
        self.failure_codes = failure_codes
        self.name = name
        self.timeout = timeout
        self.always_reconnect = not cache_connections
        self.consistency_interval = consistency_interval
        self.no_ssl_validation = no_ssl_validation
        self.ssl_cert_directory = ssl_cert_directory
        self.ssl_sticky = ssl_sticky

        default_port = 8000
        if timeout is not False:
            self.timeout = timeout

        # Function to use to retrieve topology for consistency syncs.
        # Needs to be set by module that uses the servermanager.
        self.get_topo_function = None
        self.get_topo_function_args = {}

        if not servers:
            raise cfg.Error(_('Servers not defined. Aborting server manager.'))
        servers = [
            s if len(s.rsplit(':', 1)) == 2 else "%s:%d" % (s, default_port)
            for s in servers
        ]
        if any((len(spl) != 2 or not spl[1].isdigit())
               for spl in [sp.rsplit(':', 1) for sp in servers]):
            raise cfg.Error(
                _('Servers must be defined as <ip>:<port>. '
                  'Configuration was %s') % servers)
        self.servers = [
            self.server_proxy_for(server, int(port))
            for server, port in (s.rsplit(':', 1) for s in servers)
        ]
        eventlet.spawn(self._consistency_watchdog, self.consistency_interval)
        LOG.debug("ServerPool: initialization done")
Ejemplo n.º 4
0
    def _fetch_and_store_cert(self, server, port, path):
        '''
        Grabs a certificate from a server and writes it to
        a given path.
        '''
        try:
            cert = ssl.get_server_certificate((server, port),
                                              ssl_version=ssl.PROTOCOL_TLSv1)
        except Exception as e:
            raise cfg.Error(
                _('Could not retrieve initial '
                  'certificate from controller %(server)s. '
                  'Error details: %(error)s') % {
                      'server': server,
                      'error': str(e)
                  })

        LOG.warning(
            _("Storing to certificate for host %(server)s "
              "at %(path)s") % {
                  'server': server,
                  'path': path
              })
        self._file_put_contents(path, cert)

        return cert
Ejemplo n.º 5
0
def _make_opt(name, default):
    """Create an oslo.config option with type deduction

    The type for the option is deduced from the %default value given
    for that option. A default value of None is deduced to Opt.

    Note: MultiStrOpt is not supported.

    :param name: the name of the option in a valid Python identifier
    :param default: the default value of the option, or (default, description)
    :raises: cfg.Error if the type can not be deduced.
    """

    deduction = {
        str: cfg.StrOpt,
        bool: cfg.BoolOpt,
        int: cfg.IntOpt,
        long: cfg.IntOpt,
        float: cfg.FloatOpt,
        list: cfg.ListOpt,
    }

    if type(default) is tuple:
        default, help = default
    else:
        help = None

    if default is None:
        return cfg.Opt(name, help=help)

    try:
        return deduction[type(default)](name, help=help, default=default)
    except KeyError:
        raise cfg.Error(u'unrecognized option type')
Ejemplo n.º 6
0
    def __init__(self,
                 timeout=False,
                 base_uri=BASE_URI,
                 name='NeutronRestProxy'):
        LOG.debug(_("ServerPool: initializing"))
        # 'servers' is the list of network controller REST end-points
        # (used in order specified till one succeeds, and it is sticky
        # till next failure). Use 'server_auth' to encode api-key
        servers = cfg.CONF.RESTPROXY.servers
        self.auth = cfg.CONF.RESTPROXY.server_auth
        self.ssl = cfg.CONF.RESTPROXY.server_ssl
        self.neutron_id = cfg.CONF.RESTPROXY.neutron_id
        self.base_uri = base_uri
        self.name = name
        self.timeout = cfg.CONF.RESTPROXY.server_timeout
        self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections
        default_port = 8000
        if timeout is not False:
            self.timeout = timeout

        # Function to use to retrieve topology for consistency syncs.
        # Needs to be set by module that uses the servermanager.
        self.get_topo_function = None
        self.get_topo_function_args = {}

        # Hash to send to backend with request as expected previous
        # state to verify consistency.
        self.consistency_hash = cdb.get_consistency_hash()
        eventlet.spawn(self._consistency_watchdog,
                       cfg.CONF.RESTPROXY.consistency_interval)

        if not servers:
            raise cfg.Error(_('Servers not defined. Aborting server manager.'))
        servers = [
            s if len(s.rsplit(':', 1)) == 2 else "%s:%d" % (s, default_port)
            for s in servers
        ]
        if any(
            (len(spl) != 2) for spl in [sp.rsplit(':', 1) for sp in servers]):
            raise cfg.Error(
                _('Servers must be defined as <ip>:<port>. '
                  'Configuration was %s') % servers)
        self.servers = [
            self.server_proxy_for(server, int(port))
            for server, port in (s.rsplit(':', 1) for s in servers)
        ]
        LOG.debug(_("ServerPool: initialization done"))
Ejemplo n.º 7
0
 def initialize(self):
     try:
         self.pci_vendor_info = self._parse_pci_vendor_config(
             cfg.CONF.ml2_sriov.supported_pci_vendor_devs)
         self.agent_required = cfg.CONF.ml2_sriov.agent_required
     except ValueError:
         LOG.exception(_("Failed to parse supported PCI vendor devices"))
         raise cfg.Error(_("Parsing supported pci_vendor_devs failed"))
Ejemplo n.º 8
0
def register_deprecated(conf):
    conf.register_opts([host_route_depr])
    multi_parser = cfg.MultiConfigParser()
    read_ok = multi_parser.read(conf.config_file)
    if len(read_ok) != len(conf.config_file):
        raise cfg.Error("Some config files were not parsed properly")

    for parsed_file in multi_parser.parsed:
        for section in parsed_file.keys():
            if section not in conf and section.startswith("CLUSTER:"):
                conf.register_opts(cluster_opts + [controller_depr], section)
Ejemplo n.º 9
0
def parse_pci_vendor_config():
    vendor_list = []
    vendor_config_list = cfg.CONF.ml2_cisco_ucsm.supported_pci_devs
    for vendor in vendor_config_list:
        vendor_product = vendor.split(':')
        if len(vendor_product) != 2:
            raise cfg.Error(
                _("UCS Mech Driver: Invalid PCI device "
                  "config: %s") % vendor)
        vendor_list.append(vendor)
    return vendor_list
Ejemplo n.º 10
0
def parse_ucsm_host_config():
    host_dict = {}
    host_config_list = cfg.CONF.ml2_cisco_ucsm.ucsm_host_list
    for host in host_config_list:
        host_sp = host.split(':')
        if len(host_sp) != 2:
            raise cfg.Error(
                _("UCS Mech Driver: Invalid Host Service "
                  "Profile config: %s") % host)
        key = host_sp[0]
        host_dict[key] = host_sp[1]
    return host_dict
Ejemplo n.º 11
0
    def __init__(self):
        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(cfg.CONF.config_file)

        if len(read_ok) != len(cfg.CONF.config_file):
            raise cfg.Error(
                _("Failed to read config files %(file)s") %
                {'file': cfg.CONF.config_file})

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                for key, value in parsed_file[parsed_item].items():
                    if parsed_item == 'mech_driver_agent':
                        self.dfa_cfg[key] = value
Ejemplo n.º 12
0
    def rest_call(self, action, resource, data, headers, ignore_codes,
                  timeout=False):
        hash_handler = cdb.HashHandler(context=self.get_context_ref())
        good_first = sorted(self.servers, key=lambda x: x.failed)
        first_response = None
        for active_server in good_first:
            ret = active_server.rest_call(action, resource, data, headers,
                                          timeout,
                                          reconnect=self.always_reconnect,
                                          hash_handler=hash_handler)
            # If inconsistent, do a full synchronization
            if ret[0] == httplib.CONFLICT:
                if not self.get_topo_function:
                    raise cfg.Error(_('Server requires synchronization, '
                                      'but no topology function was defined.'))
                # The hash was incorrect so it needs to be removed
                hash_handler.put_hash('')
                data = self.get_topo_function(**self.get_topo_function_args)
                active_server.rest_call('PUT', TOPOLOGY_PATH, data,
                                        timeout=None)
            # Store the first response as the error to be bubbled up to the
            # user since it was a good server. Subsequent servers will most
            # likely be cluster slaves and won't have a useful error for the
            # user (e.g. 302 redirect to master)
            if not first_response:
                first_response = ret
            if not self.server_failure(ret, ignore_codes):
                active_server.failed = False
                return ret
            else:
                LOG.error(_('ServerProxy: %(action)s failure for servers: '
                            '%(server)r Response: %(response)s'),
                          {'action': action,
                           'server': (active_server.server,
                                      active_server.port),
                           'response': ret[3]})
                LOG.error(_("ServerProxy: Error details: status=%(status)d, "
                            "reason=%(reason)r, ret=%(ret)s, data=%(data)r"),
                          {'status': ret[0], 'reason': ret[1], 'ret': ret[2],
                           'data': ret[3]})
                active_server.failed = True

        # All servers failed, reset server list and try again next time
        LOG.error(_('ServerProxy: %(action)s failure for all servers: '
                    '%(server)r'),
                  {'action': action,
                   'server': tuple((s.server,
                                    s.port) for s in self.servers)})
        return first_response
Ejemplo n.º 13
0
    def _create_hp_config(self):
        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(cfg.CONF.config_file)

        if len(read_ok) != len(cfg.CONF.config_file):
            raise cfg.Error(_("Some config files were not parsed properly."))

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                config_id, sep, ip = parsed_item.partition(':')
                config_key = config_id.lower()
                key_items = parsed_file[parsed_item].items()
                if config_key == 'ml2_hp_leaf':
                    self._create_leaf_config(ip, key_items)
                elif config_key == 'ml2_hp_spine':
                    self._create_spine_config(ip, key_items)
Ejemplo n.º 14
0
    def _create_ml2_mech_device_cisco_dictionary(self):
        """Create the ML2 device cisco dictionary.

        Read data from the ml2_conf_cisco.ini device supported sections.
        """
        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(cfg.CONF.config_file)

        if len(read_ok) != len(cfg.CONF.config_file):
            raise cfg.Error(_("Some config files were not parsed properly"))

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                dev_id, sep, dev_ip = parsed_item.partition(':')
                if dev_id.lower() == 'ml2_mech_cisco_nexus':
                    for dev_key, value in parsed_file[parsed_item].items():
                        self.nexus_dict[dev_ip, dev_key] = value[0]
Ejemplo n.º 15
0
    def _create_device_dictionary(self):
        """
        Create the device dictionary from the cisco_plugins.ini
        device supported sections. Ex. NEXUS_SWITCH, N1KV.
        """

        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(CONF.config_file)

        if len(read_ok) != len(CONF.config_file):
            raise cfg.Error(_("Some config files were not parsed properly"))

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                dev_id, sep, dev_ip = parsed_item.partition(':')
                if dev_id.lower() in ['nexus_switch', 'n1kv']:
                    for dev_key, value in parsed_file[parsed_item].items():
                        device_dictionary[dev_id, dev_ip, dev_key] = value[0]
Ejemplo n.º 16
0
    def _create_nexus_dictionary(self):
        """Create the Nexus dictionary.

        Reads data from cisco_plugins.ini NEXUS_SWITCH section(s).
        """

        multi_parser = cfg.MultiConfigParser()
        read_ok = multi_parser.read(CONF.config_file)

        if len(read_ok) != len(CONF.config_file):
            raise cfg.Error("Some config files were not parsed properly")

        for parsed_file in multi_parser.parsed:
            for parsed_item in parsed_file.keys():
                nexus_name, sep, nexus_ip = parsed_item.partition(':')
                if nexus_name.lower() == "nexus_switch":
                    for nexus_key, value in parsed_file[parsed_item].items():
                        nexus_dictionary[nexus_ip, nexus_key] = value[0]
Ejemplo n.º 17
0
def create_switch_dictionary():
    multi_parser = cfg.MultiConfigParser()
    read_ok = multi_parser.read(cfg.CONF.config_file)

    if len(read_ok) != len(cfg.CONF.config_file):
        raise cfg.Error(_("Some config files were not parsed properly"))

    for parsed_file in multi_parser.parsed:
        for parsed_item in parsed_file.keys():
            if parsed_item.startswith('apic_switch'):
                switch, switch_id = parsed_item.split(':')
                if switch.lower() == 'apic_switch':
                    _switch_dict[switch_id] = {}
                    port_cfg = parsed_file[parsed_item].items()
                    for host_list, port in port_cfg:
                        hosts = host_list.split(',')
                        port = port[0]
                        _switch_dict[switch_id][port] = hosts

    return _switch_dict
Ejemplo n.º 18
0
def find_available_csrs_from_config(config_files):
    """Read INI for available Cisco CSRs that driver can use.

    Loads management port, tunnel IP, user, and password information for
    available CSRs from configuration file. Driver will use this info to
    configure VPN connections. The CSR is associated 1:1 with a Neutron
    router. To identify which CSR to use for a VPN service, the public
    (GW) IP of the Neutron router will be used as an index into the CSR
    config info.
    """
    multi_parser = cfg.MultiConfigParser()
    LOG.info(_("Scanning config files %s for Cisco CSR configurations"),
             config_files)
    try:
        read_ok = multi_parser.read(config_files)
    except cfg.ParseError as pe:
        LOG.error(_("Config file parse error: %s"), pe)
        return {}

    if len(read_ok) != len(config_files):
        raise cfg.Error(
            _("Unable to parse config files %s for Cisco CSR "
              "info") % config_files)
    csrs_found = {}
    for parsed_file in multi_parser.parsed:
        for parsed_item in parsed_file.keys():
            device_type, sep, for_router = parsed_item.partition(':')
            if device_type.lower() == 'cisco_csr_rest':
                try:
                    netaddr.IPNetwork(for_router)
                except netaddr.core.AddrFormatError:
                    LOG.error(
                        _("Ignoring Cisco CSR configuration entry - "
                          "router IP %s is not valid"), for_router)
                    continue
                entry = parsed_file[parsed_item]
                # Check for missing fields
                try:
                    rest_mgmt_ip = entry['rest_mgmt'][0]
                    tunnel_ip = entry['tunnel_ip'][0]
                    username = entry['username'][0]
                    password = entry['password'][0]
                except KeyError as ke:
                    LOG.error(
                        _("Ignoring Cisco CSR for router %(router)s "
                          "- missing %(field)s setting"), {
                              'router': for_router,
                              'field': str(ke)
                          })
                    continue
                # Validate fields
                try:
                    timeout = float(entry['timeout'][0])
                except ValueError:
                    LOG.error(
                        _("Ignoring Cisco CSR for router %s - "
                          "timeout is not a floating point number"),
                        for_router)
                    continue
                except KeyError:
                    timeout = csr_client.TIMEOUT
                try:
                    netaddr.IPAddress(rest_mgmt_ip)
                except netaddr.core.AddrFormatError:
                    LOG.error(
                        _("Ignoring Cisco CSR for subnet %s - "
                          "REST management is not an IP address"), for_router)
                    continue
                try:
                    netaddr.IPAddress(tunnel_ip)
                except netaddr.core.AddrFormatError:
                    LOG.error(
                        _("Ignoring Cisco CSR for router %s - "
                          "local tunnel is not an IP address"), for_router)
                    continue
                csrs_found[for_router] = {
                    'rest_mgmt': rest_mgmt_ip,
                    'tunnel_ip': tunnel_ip,
                    'username': username,
                    'password': password,
                    'timeout': timeout
                }

                LOG.debug(_("Found CSR for router %(router)s: %(info)s"), {
                    'router': for_router,
                    'info': csrs_found[for_router]
                })
    return csrs_found
Ejemplo n.º 19
0
    def rest_call(self,
                  action,
                  resource,
                  data,
                  headers,
                  ignore_codes,
                  timeout=False):
        context = self.get_context_ref()
        if context:
            # include the requesting context information if available
            cdict = context.to_dict()
            headers[REQ_CONTEXT_HEADER] = json.dumps(cdict)
        hash_handler = cdb.HashHandler(context=context)
        good_first = sorted(self.servers, key=lambda x: x.failed)
        first_response = None
        for active_server in good_first:
            for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
                ret = active_server.rest_call(action,
                                              resource,
                                              data,
                                              headers,
                                              timeout,
                                              reconnect=self.always_reconnect,
                                              hash_handler=hash_handler)
                if ret[0] != httplib.SERVICE_UNAVAILABLE:
                    break
                time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)

            # If inconsistent, do a full synchronization
            if ret[0] == httplib.CONFLICT:
                if not self.get_topo_function:
                    raise cfg.Error(
                        _('Server requires synchronization, '
                          'but no topology function was defined.'))
                data = self.get_topo_function(**self.get_topo_function_args)
                active_server.rest_call('PUT',
                                        TOPOLOGY_PATH,
                                        data,
                                        timeout=None)
            # Store the first response as the error to be bubbled up to the
            # user since it was a good server. Subsequent servers will most
            # likely be cluster slaves and won't have a useful error for the
            # user (e.g. 302 redirect to master)
            if not first_response:
                first_response = ret
            if not self.server_failure(ret, ignore_codes):
                active_server.failed = False
                return ret
            else:
                LOG.error(
                    _('ServerProxy: %(action)s failure for servers: '
                      '%(server)r Response: %(response)s'), {
                          'action': action,
                          'server': (active_server.server, active_server.port),
                          'response': ret[3]
                      })
                LOG.error(
                    _("ServerProxy: Error details: status=%(status)d, "
                      "reason=%(reason)r, ret=%(ret)s, data=%(data)r"), {
                          'status': ret[0],
                          'reason': ret[1],
                          'ret': ret[2],
                          'data': ret[3]
                      })
                active_server.failed = True

        # All servers failed, reset server list and try again next time
        LOG.error(
            _('ServerProxy: %(action)s failure for all servers: '
              '%(server)r'), {
                  'action': action,
                  'server': tuple((s.server, s.port) for s in self.servers)
              })
        return first_response
Ejemplo n.º 20
0
    def rest_call(self,
                  action,
                  resource,
                  data,
                  headers,
                  ignore_codes,
                  timeout=False):
        context = self.get_context_ref()
        if context:
            # include the requesting context information if available
            cdict = context.to_dict()
            # remove the auth token so it's not present in debug logs on the
            # backend controller
            cdict.pop('auth_token', None)
            headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict)
        hash_handler = cdb.HashHandler()
        good_first = sorted(self.servers, key=lambda x: x.failed)
        first_response = None
        for active_server in good_first:
            for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
                ret = active_server.rest_call(action,
                                              resource,
                                              data,
                                              headers,
                                              timeout,
                                              reconnect=self.always_reconnect,
                                              hash_handler=hash_handler)
                if ret[0] != httplib.SERVICE_UNAVAILABLE:
                    break
                time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)

            # If inconsistent, do a full synchronization
            if ret[0] == httplib.CONFLICT:
                if not self.get_topo_function:
                    raise cfg.Error(
                        _('Server requires synchronization, '
                          'but no topology function was defined.'))
                data = self.get_topo_function(**self.get_topo_function_args)
                active_server.rest_call('PUT',
                                        TOPOLOGY_PATH,
                                        data,
                                        timeout=None)
            # Store the first response as the error to be bubbled up to the
            # user since it was a good server. Subsequent servers will most
            # likely be cluster slaves and won't have a useful error for the
            # user (e.g. 302 redirect to master)
            if not first_response:
                first_response = ret
            if not self.server_failure(ret, ignore_codes):
                active_server.failed = False
                return ret
            else:
                LOG.error(
                    _('ServerProxy: %(action)s failure for servers: '
                      '%(server)r Response: %(response)s'), {
                          'action': action,
                          'server': (active_server.server, active_server.port),
                          'response': ret[3]
                      })
                LOG.error(
                    _("ServerProxy: Error details: status=%(status)d, "
                      "reason=%(reason)r, ret=%(ret)s, data=%(data)r"), {
                          'status': ret[0],
                          'reason': ret[1],
                          'ret': ret[2],
                          'data': ret[3]
                      })
                active_server.failed = True

        # A failure on a delete means the object is gone from Neutron but not
        # from the controller. Set the consistency hash to a bad value to
        # trigger a sync on the next check.
        # NOTE: The hash must have a comma in it otherwise it will be ignored
        # by the backend.
        if action == 'DELETE':
            hash_handler.put_hash('INCONSISTENT,INCONSISTENT')
        # All servers failed, reset server list and try again next time
        LOG.error(
            _('ServerProxy: %(action)s failure for all servers: '
              '%(server)r'), {
                  'action': action,
                  'server': tuple((s.server, s.port) for s in self.servers)
              })
        return first_response