def remove_router_interface(self, context, router_id, interface_info): # Validate args router = self._get_router(context, router_id) tenant_id = router['tenant_id'] # we will first get the interface identifier before deleting in the DB if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise exceptions.BadRequest(resource='router', msg=msg) if 'port_id' in interface_info: port = self._get_port(context, interface_info['port_id']) interface_id = port['fixed_ips'][0]['subnet_id'] elif 'subnet_id' in interface_info: subnet = self._get_subnet(context, interface_info['subnet_id']) interface_id = subnet['id'] else: msg = _("Either subnet_id or port_id must be specified") raise exceptions.BadRequest(resource='router', msg=msg) with db.context_manager.writer.using(context): # remove router in DB # TODO(wolverineav): hack until fixed at right place setattr(context, 'GUARD_TRANSACTION', False) del_ret = super(L3RestProxy, self).remove_router_interface( context, router_id, interface_info) # create router on the network controller self.servers.rest_remove_router_interface(tenant_id, router_id, interface_id) return del_ret
def _get_combined_cert_for_server(self, server, port): # The ssl library requires a combined file with all trusted certs # so we make one containing the trusted CAs and the corresponding # host cert for this server combined_cert = None if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation: base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory host_dir = os.path.join(base_ssl, 'host_certs') ca_dir = os.path.join(base_ssl, 'ca_certs') combined_dir = os.path.join(base_ssl, 'combined') combined_cert = os.path.join(combined_dir, '%s.pem' % server) if not os.path.exists(base_ssl): raise cfg.Error( _('ssl_cert_directory [%s] does not exist. ' 'Create it or disable ssl.') % base_ssl) for automake in [combined_dir, ca_dir, host_dir]: if not os.path.exists(automake): os.makedirs(automake) # get all CA certs certs = self._get_ca_cert_paths(ca_dir) # check for a host specific cert hcert, exists = self._get_host_cert_path(host_dir, server) if exists: certs.append(hcert) elif cfg.CONF.RESTPROXY.ssl_sticky: self._fetch_and_store_cert(server, port, hcert) certs.append(hcert) if not certs: raise cfg.Error( _('No certificates were found to verify ' 'controller %s') % (server)) self._combine_certs_to_file(certs, combined_cert) return combined_cert
def remove_router_interface(self, context, router_id, interface_info): # Validate args router = self._get_router(context, router_id) tenant_id = router['tenant_id'] # we will first get the interface identifier before deleting in the DB if not interface_info: msg = _("Either subnet_id or port_id must be specified") raise exceptions.BadRequest(resource='router', msg=msg) if 'port_id' in interface_info: port = self._get_port(context, interface_info['port_id']) interface_id = port['fixed_ips'][0]['subnet_id'] elif 'subnet_id' in interface_info: subnet = self._get_subnet(context, interface_info['subnet_id']) interface_id = subnet['id'] else: msg = _("Either subnet_id or port_id must be specified") raise exceptions.BadRequest(resource='router', msg=msg) with db.context_manager.writer.using(context): # remove router in DB # TODO(wolverineav): hack until fixed at right place setattr(context, 'GUARD_TRANSACTION', False) del_ret = super(L3RestProxy, self).remove_router_interface(context, router_id, interface_info) # create router on the network controller self.servers.rest_remove_router_interface(tenant_id, router_id, interface_id) return del_ret
def _get_combined_cert_for_server(self, server, port): # The ssl library requires a combined file with all trusted certs # so we make one containing the trusted CAs and the corresponding # host cert for this server combined_cert = None if self.ssl and not cfg.CONF.RESTPROXY.no_ssl_validation: base_ssl = cfg.CONF.RESTPROXY.ssl_cert_directory host_dir = os.path.join(base_ssl, 'host_certs') ca_dir = os.path.join(base_ssl, 'ca_certs') combined_dir = os.path.join(base_ssl, 'combined') combined_cert = os.path.join(combined_dir, '%s.pem' % server) if not os.path.exists(base_ssl): raise cfg.Error(_('ssl_cert_directory [%s] does not exist. ' 'Create it or disable ssl.') % base_ssl) for automake in [combined_dir, ca_dir, host_dir]: if not os.path.exists(automake): os.makedirs(automake) # get all CA certs certs = self._get_ca_cert_paths(ca_dir) # check for a host specific cert hcert, exists = self._get_host_cert_path(host_dir, server) if exists: certs.append(hcert) elif cfg.CONF.RESTPROXY.ssl_sticky: self._fetch_and_store_cert(server, port, hcert) certs.append(hcert) if not certs: raise cfg.Error(_('No certificates were found to verify ' 'controller %s') % (server)) self._combine_certs_to_file(certs, combined_cert) return combined_cert
def _networktemplateassignment_updatable_args(parser): parser.add_argument( 'template_id', metavar='template-id', help=_('ID of the network template associated with this tenant.')) parser.add_argument( 'stack_id', metavar='stack-id', help=_('ID of the heat template associated with this tenant.'))
def _get_cached_vswitch_existence(self, host): """Returns cached existence. Expired and non-cached raise ValueError. """ entry = self.ivs_host_cache.get(host) if not entry: raise ValueError(_('No cache entry for host %s') % host) diff = timeutils.delta_seconds(entry['timestamp'], datetime.datetime.now()) if diff > CACHE_VSWITCH_TIME: self.ivs_host_cache.pop(host) raise ValueError(_('Expired cache entry for host %s') % host) return entry['exists']
def rest_update_router(self, tenant_id, router, router_id): self._check_and_raise_exception_unsupported_name( ObjTypeEnum.router, router) resource = ROUTERS_PATH % (tenant_id, router_id) data = {"router": router} errstr = _("Unable to update remote router: %s") self.rest_action('PUT', resource, data, errstr)
class TenantIDNotFound(exceptions.NeutronException): message = _("Tenant: %(tenant)s is not known by keystone.") status = None def __init__(self, **kwargs): self.tenant = kwargs.get('tenant') super(TenantIDNotFound, self).__init__(**kwargs)
def rest_get_switch(self, switch_id): resource = SWITCHES_PATH % switch_id errstr = _("Unable to retrieve switch: %s") resp = self.rest_action('GET', resource, errstr=errstr, ignore_codes=[404]) # return None if switch not found, else return switch info return None if resp[0] == 404 else resp[3]
def _validate_port_number(self, port_num): msg = None int_port_num = int(port_num) if int_port_num not in range(0, 65536): msg = (_("Port number specified %s in policy not in valid " "range of 1 to 65535.") % port_num) return msg
def _fetch_and_store_cert(self, server, port, path): """_fetch_and_store_cert Grabs a certificate from a server and writes it to a given path. """ try: cert = ssl.get_server_certificate((server, port), ssl_version=ssl.PROTOCOL_SSLv23) except Exception as e: raise cfg.Error( _('Could not retrieve initial ' 'certificate from controller %(server)s. ' 'Error details: %(error)s') % { 'server': server, 'error': e }) LOG.warning( "Storing to certificate for host %(server)s " "at %(path)s", { 'server': server, 'path': path }) self._file_put_contents(path, cert) return cert
def rest_update_router(self, tenant_id, router, router_id): self._check_and_raise_exception_unsupported_name(ObjTypeEnum.router, router) resource = ROUTERS_PATH % (tenant_id, router_id) data = {"router": router} errstr = _("Unable to update remote router: %s") self.rest_action('PUT', resource, data, errstr)
def rest_get_switch(self, switch_id): resource = SWITCHES_PATH % switch_id errstr = _("Unable to retrieve switch: %s") resp = self.rest_action('GET', resource, errstr=errstr, ignore_codes=[404]) # return None if switch not found, else return switch info return None if resp[0] == 404 else resp[3]
def rest_create_securitygroup(self, sg): self._check_and_raise_exception_unsupported_name( ObjTypeEnum.security_group, sg) resource = SECURITY_GROUP_RESOURCE_PATH data = {"security-group": sg} errstr = _("Unable to create security group: %s") self.rest_action('POST', resource, data, errstr)
class TenantPolicyException(n_exc.NeutronException): message = _("Error in tenant policy operation: %(error_msg)s") status = None def __init__(self, **kwargs): self.tenant = kwargs.get('error_msg') super(TenantPolicyException, self).__init__(**kwargs)
def rest_create_securitygroup(self, sg): self._check_and_raise_exception_unsupported_name( ObjTypeEnum.security_group, sg) resource = SECURITY_GROUP_RESOURCE_PATH data = {"security-group": sg} errstr = _("Unable to create security group: %s") self.rest_action('POST', resource, data, errstr)
def router_interface_before_create_callback(self, resource, event, trigger, **kwargs): context = kwargs.get('context') router = kwargs.get('router_db') port = kwargs.get('port') interface_info = kwargs.get('interface_info') router_id = kwargs.get('router_id') if 'port_id' in interface_info: subnet_id = port['fixed_ips'][0]['subnet_id'] elif 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] else: msg = _("Either subnet_id or port_id must be specified") raise exceptions.BadRequest(resource='router', msg=msg) # bookmark for delete in case of transaction rollback self.txn_cache.add_transaction(interface_info[BSN_TRANSACTION_ID], subnet_id) with db.context_manager.reader.using(context): # we will use the port's subnet id as interface's id intf_details = self._get_router_intf_details( context, port, subnet_id) # create interface on the network controller self.servers.rest_add_router_interface(router['tenant_id'], router_id, intf_details)
def rest_create_router(self, tenant_id, router): self._check_and_raise_exception_unsupported_name( ObjTypeEnum.router, router) resource = ROUTER_RESOURCE_PATH % tenant_id data = {"router": router} errstr = _("Unable to create remote router: %s") self.rest_action('POST', resource, data, errstr)
def rest_get_port(self, tenant_id, net_id, port_id): resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id) errstr = _("Unable to retrieve port: %s") resp = self.rest_action('GET', resource, errstr=errstr, ignore_codes=[404]) return None if resp[0] == 404 else resp[3]
def rest_create_router(self, tenant_id, router): self._check_and_raise_exception_unsupported_name(ObjTypeEnum.router, router) resource = ROUTER_RESOURCE_PATH % tenant_id data = {"router": router} errstr = _("Unable to create remote router: %s") self.rest_action('POST', resource, data, errstr)
def _validate_priority(self, priority): msg = None int_priority = int(priority) if (int_priority not in range(1, 3001) and int_priority != DEFAULT_POLICY_PRIORITY): msg = _("User must provide valid priority between 1 and 3000. " "%s was provided.") % priority return msg
def _validate_port_protocol(self, policy_data): msg = None if ((int(policy_data['source_port']) > 0 or int(policy_data['destination_port']) > 0) and policy_data['protocol'] not in ['tcp', 'udp']): msg = _("Protocol must be select if either source or destination " "port is specified.") return msg
def _validate_nexthops(nexthops): seen = [] for ip in nexthops: msg = validators.validate_ip_address(ip) if ip in seen: msg = _("Duplicate nexthop in rule '%s'") % ip seen.append(ip) if msg: return msg
def _validate_nexthops(nexthops): seen = [] for ip in nexthops: msg = validators.validate_ip_address(ip) if ip in seen: msg = _("Duplicate nexthop in rule '%s'") % ip seen.append(ip) if msg: return msg
class RemoteRestError(exceptions.NeutronException): message = _("Error in REST call to remote network " "controller: %(reason)s") status = None def __init__(self, **kwargs): self.status = kwargs.pop('status', None) self.reason = kwargs.get('reason') super(RemoteRestError, self).__init__(**kwargs)
def _get_cached_vswitch_existence(self, host): """Returns cached existence. Expired and non-cached raise ValueError. """ entry = self.vswitch_host_cache.get(host) if not entry: raise ValueError(_('No cache entry for host %s') % host) diff = timeutils.delta_seconds(entry['timestamp'], datetime.datetime.now()) if diff > CACHE_VSWITCH_TIME: self.vswitch_host_cache.pop(host) raise ValueError(_('Expired cache entry for host %s') % host) if entry['exists']: return entry['type'] return None
def connect(self): contents = get_cert_contents(self.combined_cert) expected = self.expected_cert % self.host if expected not in contents: raise Exception(_('No host cert for %(server)s in cert %(cert)s'), { 'server': self.host, 'cert': contents })
def rest_get_testpath(self, src, dst): resource = TESTPATH_PATH % {'src-tenant': src['tenant'], 'src-segment': src['segment'], 'src-ip': src['ip'], 'dst-ip': dst['ip']} errstr = _("Unable to retrieve results for testpath ID: %s") resp = self.rest_action('GET', resource, errstr=errstr, ignore_codes=[404]) # return None if testpath not found, else return testpath info return None if (resp[0] not in range(200, 300)) else resp[3]
class LockRetryCountExceededException(exceptions.NeutronException): message = _("TOPO_SYNC: LockTS %(lock_ts)s, datetime %(lock_dt_string)s " "exceeded MAX_LOCK_RETRY_COUNT %(max_lock)s.") status = None def __init__(self, **kwargs): self.lock_ts = kwargs.get('lock_ts') self.lock_dt_string = kwargs.get('lock_dt_string') self.max_lock = kwargs.get('max_lock') super(LockRetryCountExceededException, self).__init__(**kwargs)
def get_tenant_id_for_create(context, resource): if context.is_admin and 'tenant_id' in resource: tenant_id = resource['tenant_id'] elif ('tenant_id' in resource and resource['tenant_id'] != context.tenant_id): reason = _('Cannot create resource for another tenant') raise n_exc.AdminRequired(reason=reason) else: tenant_id = context.tenant_id return tenant_id
def _get_tenant_id_for_create(self, context, resource): if context.is_admin and 'tenant_id' in resource: tenant_id = resource['tenant_id'] elif ('tenant_id' in resource and resource['tenant_id'] != context.tenant_id): reason = _('Cannot create resource for another tenant') raise n_exc.AdminRequired(reason=reason) else: tenant_id = context.tenant_id return tenant_id
def rest_get_testpath(self, src, dst): resource = TESTPATH_PATH % {'src-tenant': src['tenant'], 'src-segment': src['segment'], 'src-ip': src['ip'], 'dst-ip': dst['ip']} errstr = _("Unable to retrieve results for testpath ID: %s") resp = self.rest_action('GET', resource, errstr=errstr, ignore_codes=[404]) # return None if testpath not found, else return testpath info return None if (resp[0] not in range(200, 300)) else resp[3]
def rest_update_network(self, tenant_id, net_id, network): self._check_and_raise_exception_unsupported_name( ObjTypeEnum.network, network) if 'subnets' in network: for subnet in network['subnets']: self._check_and_raise_exception_unsupported_name( ObjTypeEnum.subnet, subnet) resource = NETWORKS_PATH % (tenant_id, net_id) data = {"network": network} errstr = _("Unable to update remote network: %s") self.rest_action('PUT', resource, data, errstr)
def request(self, action, uri, body, headers): # Only handle network update requests if 'network' in uri and 'tenant' in uri and 'ports' not in uri: req = jsonutils.loads(body) if 'network' not in req or 'floatingips' not in req['network']: msg = _("No floating IPs in request" "uri=%(uri)s, body=%(body)s") % {'uri': uri, 'body': body} raise Exception(msg) distinct_tenants = [] for flip in req['network']['floatingips']: if flip['tenant_id'] not in distinct_tenants: distinct_tenants.append(flip['tenant_id']) if len(distinct_tenants) < 2: msg = _("Expected floating IPs from multiple tenants." "uri=%(uri)s, body=%(body)s") % {'uri': uri, 'body': body} raise Exception(msg) super(VerifyMultiTenantFloatingIP, self).request(action, uri, body, headers)
def rest_update_network(self, tenant_id, net_id, network): self._check_and_raise_exception_unsupported_name(ObjTypeEnum.network, network) if 'subnets' in network: for subnet in network['subnets']: self._check_and_raise_exception_unsupported_name( ObjTypeEnum.subnet, subnet) resource = NETWORKS_PATH % (tenant_id, net_id) data = {"network": network} errstr = _("Unable to update remote network: %s") self.rest_action('PUT', resource, data, errstr)
def request(self, action, uri, body, headers): # Only handle network update requests if 'network' in uri and 'tenant' in uri and 'ports' not in uri: req = jsonutils.loads(body) if 'network' not in req or 'floatingips' not in req['network']: msg = _("No floating IPs in request" "uri=%(uri)s, body=%(body)s") % {'uri': uri, 'body': body} raise Exception(msg) distinct_tenants = [] for flip in req['network']['floatingips']: if flip['tenant_id'] not in distinct_tenants: distinct_tenants.append(flip['tenant_id']) if len(distinct_tenants) < 2: msg = _("Expected floating IPs from multiple tenants." "uri=%(uri)s, body=%(body)s") % {'uri': uri, 'body': body} raise Exception(msg) super(VerifyMultiTenantFloatingIP, self).request(action, uri, body, headers)
def _validate_nexthops(self, nexthops): msg = None seen = [] nexthops = nexthops.split(',') for ip in nexthops: if not ip: continue msg = validators.validate_ip_address(ip) if ip in seen: msg = _("Duplicate nexthop in rule '%s'") % ip return msg seen.append(ip) return msg
def rest_create_port(self, tenant_id, net_id, port): resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"]) data = {"port": port} device_id = port.get("device_id") if not port["mac_address"] or not device_id: # controller only cares about ports attached to devices LOG.warning( "No device MAC attached to port %s. " "Skipping notification to controller.", port["id"]) return data["attachment"] = {"id": device_id, "mac": port["mac_address"]} errstr = _("Unable to create remote port: %s") self.rest_action('PUT', resource, data, errstr)
def _rest_create_tenant(self, tenant_id): tenant_name = self.keystone_tenants.get(tenant_id) if not tenant_name: raise TenantIDNotFound(tenant=tenant_id) if not is_valid_bcf_name(tenant_name): raise UnsupportedNameException(obj_type=ObjTypeEnum.tenant, obj_id=tenant_id, obj_name=tenant_name) resource = TENANT_RESOURCE_PATH data = {"tenant_id": tenant_id, 'tenant_name': tenant_name} errstr = _("Unable to create tenant: %s") self.rest_action('POST', resource, data, errstr)
def rest_create_port(self, tenant_id, net_id, port): resource = ATTACHMENT_PATH % (tenant_id, net_id, port["id"]) data = {"port": port} device_id = port.get("device_id") if not port["mac_address"] or not device_id: # controller only cares about ports attached to devices LOG.warning("No device MAC attached to port %s. " "Skipping notification to controller.", port["id"]) return data["attachment"] = {"id": device_id, "mac": port["mac_address"]} errstr = _("Unable to create remote port: %s") self.rest_action('PUT', resource, data, errstr)
def _rest_create_tenant(self, tenant_id): tenant_name = self.keystone_tenants.get(tenant_id) if not tenant_name: raise TenantIDNotFound(tenant=tenant_id) if not is_valid_bcf_name(tenant_name): raise UnsupportedNameException(obj_type=ObjTypeEnum.tenant, obj_id=tenant_id, obj_name=tenant_name) resource = TENANT_RESOURCE_PATH data = {"tenant_id": tenant_id, 'tenant_name': tenant_name} errstr = _("Unable to create tenant: %s") self.rest_action('POST', resource, data, errstr)
def _validate_uniquerules(rules): pairs = [] for r in rules: if ('source' not in r or 'destination' not in r or 'action' not in r or 'priority' not in r): continue pairs.append( (r['source'], r['destination'], r['action'], r['priority'])) if len(set(pairs)) != len(pairs): error = _("Duplicate router rules (src,dst,action,priority) " "found '%s'") % pairs LOG.debug(error) raise nexception.InvalidInput(error_message=error)
def _validate_uniquerules(rules): pairs = [] for r in rules: if ('source' not in r or 'destination' not in r or 'action' not in r or 'priority' not in r): continue pairs.append((r['source'], r['destination'], r['action'], r['priority'])) if len(set(pairs)) != len(pairs): error = _("Duplicate router rules (src,dst,action,priority) " "found '%s'") % pairs LOG.debug(error) raise nexception.InvalidInput(error_message=error)
def _tenantpolicy_updateable_args(parser): parser.add_argument('source', help=_('Source for the policy.')) parser.add_argument('-source_port', help=_('Source port for the policy. Optional.')) parser.add_argument('destination', help=_('Destination for the policy.')) parser.add_argument('-destination_port', help=_('Destination port for the policy. Optional.')) parser.add_argument( 'action', help=_('Action for matching traffic - permit or deny.')) parser.add_argument('-protocol', help=_('Protocol for matching traffic when specifying ' 'source or destination port.')) parser.add_argument('-nexthops', help=_('Optional nexthops.'))
class UnsupportedNameException(exceptions.NeutronException): """UnsupportedNameException Exception class to be raised when encountering object names with unsupported names. Namely those that do not conform to the regular expression BCF_IDENTIFIER_UUID_RE :keyword obj_type :keyword obj_id :keyword obj_name """ message = _("Object of type %(obj_type)s and id %(obj_id)s has unsupported" " character in name \"%(obj_name)s\". It should begin with an" " alphanumeric character [0-9a-zA-Z] and can contain space," " underscore, apostrophe, forward slash, opening and closing" " square brackets.") status = None
def _fetch_and_store_cert(self, server, port, path): """_fetch_and_store_cert Grabs a certificate from a server and writes it to a given path. """ try: cert = ssl.get_server_certificate((server, port), ssl_version=ssl.PROTOCOL_TLSv1) except Exception as e: raise cfg.Error(_('Could not retrieve initial ' 'certificate from controller %(server)s. ' 'Error details: %(error)s') % {'server': server, 'error': e}) LOG.warning("Storing to certificate for host %(server)s " "at %(path)s", {'server': server, 'path': path}) self._file_put_contents(path, cert) return cert
def convert_to_valid_router_rules(data): """Validates and converts router rules to the appropriate data structure Example argument = [{'source': 'any', 'destination': 'any', 'action':'deny'}, {'source': '1.1.1.1/32', 'destination': 'external', 'action':'permit', 'nexthops': ['1.1.1.254', '1.1.1.253']} ] """ V4ANY = '0.0.0.0/0' CIDRALL = ['any', 'external'] if not isinstance(data, list): emsg = _("Invalid data format for router rule: '%s'") % data LOG.debug(emsg) raise nexception.InvalidInput(error_message=emsg) _validate_uniquerules(data) rules = [] expected_keys = ['source', 'destination', 'action', 'priority'] for rule in data: rule['nexthops'] = rule.get('nexthops', []) if not isinstance(rule['nexthops'], list): rule['nexthops'] = rule['nexthops'].split('+') src = V4ANY if rule['source'] in CIDRALL else rule['source'] dst = V4ANY if rule['destination'] in CIDRALL else rule['destination'] errors = [validators._verify_dict_keys(expected_keys, rule, False), validators.validate_subnet(dst), validators.validate_subnet(src), _validate_nexthops(rule['nexthops']), _validate_action(rule['action']), _validate_priority(rule['priority'])] errors = [m for m in errors if m] if errors: LOG.debug(errors) raise nexception.InvalidInput(error_message=errors) rules.append(rule) return rules
def __init__(self, timeout=False, base_uri=BASE_URI, name='NeutronRestProxy'): LOG.debug("ServerPool: initializing") # 'servers' is the list of network controller REST end-points # (used in order specified till one succeeds, and it is sticky # till next failure). Use 'server_auth' to encode api-key servers = cfg.CONF.RESTPROXY.servers self.auth = cfg.CONF.RESTPROXY.server_auth self.ssl = cfg.CONF.RESTPROXY.server_ssl self.neutron_id = cfg.CONF.RESTPROXY.neutron_id if 'keystone_authtoken' in cfg.CONF: self.auth_user = get_keystoneauth_cfg(cfg.CONF, 'username') self.auth_password = get_keystoneauth_cfg(cfg.CONF, 'password') self.auth_url = get_keystoneauth_cfg(cfg.CONF, 'auth_url') self.auth_tenant = get_keystoneauth_cfg(cfg.CONF, 'project_name') self.project_domain_name = get_keystoneauth_cfg( cfg.CONF, 'project_domain_name') self.user_domain_name = get_keystoneauth_cfg( cfg.CONF, 'user_domain_name') else: # this is for UT only LOG.warning("keystone_authtoken not found in " "/etc/neutron/neutron.conf. " "Please check config file") self.auth_url = cfg.CONF.RESTPROXY.auth_url self.auth_user = cfg.CONF.RESTPROXY.auth_user self.auth_password = cfg.CONF.RESTPROXY.auth_password self.auth_tenant = cfg.CONF.RESTPROXY.auth_tenant self.project_domain_name = KS_AUTH_DOMAIN_DEFAULT self.user_domain_name = KS_AUTH_DOMAIN_DEFAULT # Use Keystonev3 URL for authentication if "v2.0" in self.auth_url: self.auth_url = self.auth_url.replace("v2.0", "v3") elif "v3" not in self.auth_url: self.auth_url = "%s/v3" % self.auth_url self.base_uri = base_uri self.name = name self.contexts = {} # Cache for Openstack projects # The cache is maintained in a separate thread and sync'ed with # Keystone periodically. self.keystone_tenants = {} self._update_tenant_cache(reconcile=False) self.timeout = cfg.CONF.RESTPROXY.server_timeout self.always_reconnect = not cfg.CONF.RESTPROXY.cache_connections default_port = 8000 if timeout is not False: self.timeout = timeout # Function to use to retrieve topology for consistency syncs. # Needs to be set by module that uses the servermanager. self.get_topo_function = None self.get_topo_function_args = {} if not servers: raise cfg.Error(_('Servers not defined. Aborting server manager.')) servers = [s if len(s.rsplit(':', 1)) == 2 else "%s:%d" % (s, default_port) for s in servers] if any((len(spl) != 2 or not spl[1].isdigit()) for spl in [sp.rsplit(':', 1) for sp in servers]): raise cfg.Error(_('Servers must be defined as <ip>:<port>. ' 'Configuration was %s') % servers) self.servers = [] for s in servers: server, port = s.rsplit(':', 1) if server.startswith("[") and server.endswith("]"): # strip [] for ipv6 address server = server[1:-1] self.servers.append(self.server_proxy_for(server, int(port))) self.start_background_tasks() ServerPool._instance = self LOG.debug("ServerPool: initialization done")
def rest_add_router_interface(self, tenant_id, router_id, intf_details): resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id) data = {"interface": intf_details} errstr = _("Unable to add router interface: %s") self.rest_action('POST', resource, data, errstr)
def rest_delete_floatingip(self, tenant_id, oldid): resource = FLOATINGIPS_PATH % (tenant_id, oldid) errstr = _("Unable to delete floating IP: %s") self.rest_action('DELETE', resource, errstr=errstr)
def rest_call(self, action, resource, data, headers, ignore_codes, timeout=False): context = self.get_context_ref() if context: # include the requesting context information if available cdict = context.to_dict() # remove the auth token so it's not present in debug logs on the # backend controller cdict.pop('auth_token', None) if ('tenant_name' in cdict and cdict['tenant_name']): cdict['tenant_name'] = Util.format_resource_name( cdict['tenant_name']) headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict) hash_handler = cdb.HashHandler() good_first = sorted(self.servers, key=lambda x: x.failed) first_response = None for active_server in good_first: LOG.debug("ServerProxy: %(action)s to servers: " "%(server)r, %(resource)s", {'action': action, 'server': (active_server.server, active_server.port), 'resource': resource}) for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1): ret = active_server.rest_call(action, resource, data, headers, timeout, reconnect=self.always_reconnect, hash_handler=hash_handler) if ret[0] != httplib.SERVICE_UNAVAILABLE: break time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL) # If inconsistent, do a full synchronization if ret[0] == httplib.CONFLICT and hash_handler.is_db_lock_owner(): if not self.get_topo_function: raise cfg.Error(_('Server requires synchronization, ' 'but no topology function was defined.')) LOG.info("ServerProxy: HashConflict detected with request " "%(action)s %(resource)s Starting Topology sync", {'action': action, 'resource': resource}) topo_hh = self.dblock_mark_toposync_started(hash_handler) try: data = self.get_topo_function( **self.get_topo_function_args) if data: ret_ts = active_server.rest_call('POST', TOPOLOGY_PATH, data, timeout=None, hash_handler=topo_hh) if self.server_failure(ret_ts, ignore_codes): LOG.error("ServerProxy: Topology sync failed") raise RemoteRestError(reason=ret_ts[2], status=ret_ts[0]) finally: LOG.info("ServerProxy: Topology sync completed") if data is None: return None elif ret[0] == httplib.CONFLICT and \ not hash_handler.is_db_lock_owner(): # DB lock ownership lost, allow current owner to detect hash # conflict and perform needed TopoSync LOG.warning("HashConflict detected but thread is no longer" " DB lock owner. Skipping TopoSync call") # Store the first response as the error to be bubbled up to the # user since it was a good server. Subsequent servers will most # likely be cluster slaves and won't have a useful error for the # user (e.g. 302 redirect to master) if not first_response: first_response = ret if not self.server_failure(ret, ignore_codes): active_server.failed = False LOG.debug("ServerProxy: %(action)s succeed for servers: " "%(server)r Response: %(response)s", {'action': action, 'server': (active_server.server, active_server.port), 'response': ret[3]}) return ret else: LOG.warning('ServerProxy: %(action)s failure for servers:' '%(server)r Response: %(response)s', {'action': action, 'server': (active_server.server, active_server.port), 'response': ret[3]}) LOG.warning("ServerProxy: Error details: " "status=%(status)d, reason=%(reason)r, " "ret=%(ret)s, data=%(data)r", {'status': ret[0], 'reason': ret[1], 'ret': ret[2], 'data': ret[3]}) active_server.failed = True # A failure on a delete means the object is gone from Neutron but not # from the controller. Set the consistency hash to a bad value to # trigger a sync on the next check. # NOTE: The hash must have a comma in it otherwise it will be ignored # by the backend. if action == 'DELETE': hash_handler.put_hash('INCONSISTENT,INCONSISTENT') # All servers failed, reset server list and try again next time LOG.error('ServerProxy: %(action)s failure for all servers: ' '%(server)r', {'action': action, 'server': tuple((s.server, s.port) for s in self.servers)}) return first_response
def rest_create_floatingip(self, tenant_id, floatingip): resource = FLOATINGIPS_PATH % (tenant_id, floatingip['id']) errstr = _("Unable to create floating IP: %s") self._ensure_tenant_cache(tenant_id) self.rest_action('PUT', resource, floatingip, errstr=errstr)
def rest_update_floatingip(self, tenant_id, floatingip, oldid): resource = FLOATINGIPS_PATH % (tenant_id, oldid) errstr = _("Unable to update floating IP: %s") self.rest_action('PUT', resource, floatingip, errstr=errstr)
def rest_delete_tenant(self, tenant_id): resource = TENANT_PATH % tenant_id errstr = _("Unable to delete tenant: %s") self.rest_action('DELETE', resource, errstr=errstr)
def rest_delete_port(self, tenant_id, network_id, port_id): resource = ATTACHMENT_PATH % (tenant_id, network_id, port_id) errstr = _("Unable to delete remote port: %s") self.rest_action('DELETE', resource, errstr=errstr)
def rest_get_port(self, tenant_id, net_id, port_id): resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id) errstr = _("Unable to retrieve port: %s") resp = self.rest_action('GET', resource, errstr=errstr, ignore_codes=[404]) return None if resp[0] == 404 else resp[3]
def rest_delete_securitygroup(self, sg_id): resource = SECURITY_GROUP_PATH % sg_id errstr = _("Unable to delete security group: %s") self.rest_action('DELETE', resource, errstr=errstr)
def rest_delete_router(self, tenant_id, router_id): resource = ROUTERS_PATH % (tenant_id, router_id) errstr = _("Unable to delete remote router: %s") self.rest_action('DELETE', resource, errstr=errstr)
def rest_delete_network(self, tenant_id, net_id): resource = NETWORKS_PATH % (tenant_id, net_id) errstr = _("Unable to delete remote network: %s") self.rest_action('DELETE', resource, errstr=errstr)
def rest_remove_router_interface(self, tenant_id, router_id, interface_id): resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id) errstr = _("Unable to delete remote intf: %s") self.rest_action('DELETE', resource, errstr=errstr)