def _delete_port_group(self, task): try: header, response = self.vcns.get_edge_id(task.userdata['job_id']) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error(_LE("NSXv: Failed to get job for %s"), task.userdata) status = response['status'] if status != 'COMPLETED': if (status == 'QUEUED' or status == 'RUNNING' or status == 'ROLLBACK'): LOG.debug("NSXv: job is still pending for %s", task.userdata) return task_constants.TaskStatus.PENDING try: self.vcns.delete_port_group( task.userdata['dvs_id'], task.userdata['port_group_id']) except Exception as e: LOG.error(_LE('Unable to delete %(pg)s (job status %(state)s) ' 'exception %(ex)s'), {'pg': task.userdata['port_group_id'], 'state': status, 'ex': e}) if status == 'FAILED': return task_constants.TaskStatus.ERROR return task_constants.TaskStatus.COMPLETED
def update_vip(self, context, old_vip, vip, pool_mapping, vip_mapping): LOG.debug('Updating VIP %s to %s', old_vip, vip) edge_id = vip_mapping['edge_id'] edge_vip_id = vip_mapping['edge_vse_id'] app_profile_id = vip_mapping['edge_app_profile_id'] app_profile = convert_lbaas_app_profile( vip['name'], vip.get('session_persistence', {}), vip.get('protocol')) try: with locking.LockManager.get_lock(edge_id): self.vcns.update_app_profile(edge_id, app_profile_id, app_profile) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.vip_failed(context, vip) LOG.error(_LE('Failed to update app profile on edge: %s'), edge_id) edge_vip = convert_lbaas_vip(vip, app_profile_id, pool_mapping) try: with locking.LockManager.get_lock(edge_id): self.vcns.update_vip(edge_id, edge_vip_id, edge_vip) self.lbv1_driver.vip_successful(context, vip) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.vip_failed(context, vip) LOG.error(_LE('Failed to update vip on edge: %s'), edge_id)
def check_edge_jobs(self, edge_id): retries = max(cfg.CONF.nsxv.retries, 1) delay = 0.5 for attempt in range(1, retries + 1): if attempt != 1: time.sleep(delay) delay = min(2 * delay, 60) h, jobs = self.vcns.get_edge_jobs(edge_id) if jobs['edgeJob'] == []: return job_number = len(jobs['edgeJob']) # Assume one job would wait time out after 20 minutes and one # job takes about 1 minute to be completed. if job_number < 20: LOG.warning(_LW("NSXv: %(num)s jobs still running on edge " "%(edge_id)s."), {'num': job_number, 'edge_id': edge_id}) else: LOG.error(_LE("NSXv: %(num)s jobs still running on edge " "%(edge_id)s. Too many jobs may lead to job " "time out at the backend"), {'num': job_number, 'edge_id': edge_id}) LOG.error(_LE('NSXv: jobs are still runnings!'))
def _status_edge(self, task): edge_id = task.userdata['edge_id'] try: response = self.vcns.get_edge_deploy_status(edge_id)[1] task.userdata['retries'] = 0 system_status = response.get('systemStatus', None) if system_status is None: status = task_constants.TaskStatus.PENDING elif system_status == 'good': status = task_constants.TaskStatus.COMPLETED else: status = task_constants.TaskStatus.ERROR except exceptions.VcnsApiException as e: LOG.exception(_LE("VCNS: Edge %s status query failed."), edge_id) raise e except Exception as e: retries = task.userdata.get('retries', 0) + 1 if retries < 3: task.userdata['retries'] = retries LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s " "status. Retry %(retries)d."), {'edge_id': edge_id, 'retries': retries}) status = task_constants.TaskStatus.PENDING else: LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. " "Abort."), edge_id) status = task_constants.TaskStatus.ERROR LOG.debug("VCNS: Edge %s status", edge_id) return status
def _reconfigure_port_group(self, pg_moref, spec_update_calback, spec_update_data): # Get the current configuration of the port group pg_spec = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, pg_moref, ['config']) if len(pg_spec) == 0 or len(pg_spec[0].propSet[0]) == 0: LOG.error(_LE('Failed to get object properties of %s'), pg_moref) raise nsx_exc.DvsNotFound(dvs=pg_moref) # Convert the extracted config to DVPortgroupConfigSpec new_spec = self._copy_port_group_spec(pg_spec[0].propSet[0].val) # Update the configuration using the callback & data spec_update_calback(new_spec, spec_update_data) # Update the port group configuration task = self._session.invoke_api(self._session.vim, 'ReconfigureDVPortgroup_Task', pg_moref, spec=new_spec) try: self._session.wait_for_task(task) except Exception: LOG.error(_LE('Failed to reconfigure DVPortGroup %s'), pg_moref) raise nsx_exc.DvsNotFound(dvs=pg_moref)
def nsx_clean_spoofguard_policy(resource, event, trigger, **kwargs): """Delete spoofguard policy""" errmsg = ("Need to specify policy-id. Add --property " "policy-id=<policy-id>") if not kwargs.get('property'): LOG.error(_LE("%s"), errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) policy_id = properties.get('policy-id') if not policy_id: LOG.error(_LE("%s"), errmsg) return try: nsxv.get_spoofguard_policy(policy_id) except exceptions.NeutronException as e: LOG.error(_LE("Unable to retrieve policy %(p)s: %(e)s"), {'p': policy_id, 'e': str(e)}) else: confirm = admin_utils.query_yes_no( "Do you want to delete spoofguard-policy: %s" % policy_id, default="no") if not confirm: LOG.info(_LI("spoofguard-policy deletion aborted by user")) return try: nsxv.delete_spoofguard_policy(policy_id) except Exception as e: LOG.error(_LE("%s"), str(e)) LOG.info(_LI('spoofguard-policy successfully deleted.'))
def nsx_update_switch(resource, event, trigger, **kwargs): nsxv = utils.get_nsxv_client() if not kwargs.get('property'): LOG.error(_LE("Need to specify dvs-id parameter and " "attribute to update. Add --property dvs-id=<dvs-id> " "--property teamingpolicy=<policy>")) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dvs_id = properties.get('dvs-id') if not dvs_id: LOG.error(_LE("Need to specify dvs-id. " "Add --property dvs-id=<dvs-id>")) return h, switch = nsxv.get_vdn_switch(dvs_id) policy = properties.get('teamingpolicy') if policy: if switch['teamingPolicy'] == policy: LOG.info(_LI("Policy already set!")) return LOG.info(_LI("Updating NSXv switch %(dvs)s teaming policy to " "%(policy)s"), {'dvs': dvs_id, 'policy': policy}) switch['teamingPolicy'] = policy switch = nsxv.update_vdn_switch(switch) LOG.info(_LI("Switch value after update: %s"), switch) else: LOG.error(_LE("No teaming policy set. " "Add --property teamingpolicy=<policy>")) LOG.info(_LI("Current switch value is: %s"), switch)
def create(self, context, listener, certificate=None): default_pool = None lb_id = listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] if listener.default_pool and listener.default_pool.id: pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener.id, listener.default_pool.id) if pool_binding: default_pool = pool_binding['edge_pool_id'] edge_cert_id = None if certificate: try: edge_cert_id = self._upload_certificate( context, edge_id, listener.default_tls_container_id, certificate) except Exception: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, listener) app_profile = listener_to_edge_app_profile(listener, edge_cert_id) app_profile_id = None try: with locking.LockManager.get_lock(edge_id): h = (self.vcns.create_app_profile(edge_id, app_profile))[0] app_profile_id = lb_common.extract_resource_id(h['location']) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, listener) LOG.error(_LE('Failed to create app profile on edge: %s'), lb_binding['edge_id']) vse = listener_to_edge_vse(listener, lb_binding['vip_address'], default_pool, app_profile_id) try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_vip(edge_id, vse)[0] edge_vse_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_listener_binding(context.session, lb_id, listener.id, app_profile_id, edge_vse_id) self.lbv2_driver.listener.successful_completion(context, listener) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.listener.failed_completion(context, listener) LOG.error(_LE('Failed to create vip on Edge: %s'), edge_id) self.vcns.delete_app_profile(edge_id, app_profile_id)
def _validate_resource_choice(resource, nsx_plugin): if nsx_plugin == 'nsxv' and resource not in nsxv_resources: LOG.error(_LE('Supported list of NSX-V resources: %s'), nsxv_resources_names) sys.exit(1) elif nsx_plugin == 'nsxv3'and resource not in nsxv3_resources: LOG.error(_LE('Supported list of NSX-V3 resources: %s'), nsxv3_resources_names) sys.exit(1)
def create(self, context, hm): listener = hm.pool.listener lb_id = listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener.id, hm.pool.id) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, listener.id, hm.pool.id, hm.id, edge_id) edge_mon_id = None if hm_binding: edge_mon_id = hm_binding['edge_mon_id'] else: edge_monitor = self._convert_lbaas_monitor(hm) try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_health_monitor(edge_id, edge_monitor)[0] edge_mon_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_monitor_binding( context.session, lb_id, listener.id, hm.pool.id, hm.id, edge_id, edge_mon_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion( context, hm) LOG.error(_LE('Failed to create health monitor on edge: %s' ), edge_id) try: # Associate monitor with Edge pool with locking.LockManager.get_lock(edge_id): edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if edge_pool.get('monitorId'): edge_pool['monitorId'].append(edge_mon_id) else: edge_pool['monitorId'] = [edge_mon_id] self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.health_monitor.failed_completion(context, hm) LOG.error( _LE('Failed to create health monitor on edge: %s'), edge_id) self.lbv2_driver.health_monitor.successful_completion(context, hm)
def change_edge_ha(properties): ha = bool(properties.get('highavailability').lower() == "true") request = { 'featureType': 'highavailability_4.0', 'enabled': ha} try: nsxv.enable_ha(properties.get('edge-id'), request, async=False) except nsxv_exceptions.ResourceNotFound as e: LOG.error(_LE("Edge %s not found"), properties.get('edge-id')) except exceptions.NeutronException as e: LOG.error(_LE("%s"), str(e))
def create_pool_health_monitor(self, context, health_monitor, pool_id, pool_mapping, mon_mappings): LOG.debug('Create HM %s', health_monitor) edge_mon_id = None with locking.LockManager.get_lock(pool_mapping['edge_id']): # 1st, we find if we already have a pool with the same monitor, on # the same Edge appliance. # If there is no pool on this Edge which is already associated with # this monitor, create this monitor on Edge if mon_mappings: edge_mon_id = mon_mappings['edge_monitor_id'] else: edge_monitor = convert_lbaas_monitor(health_monitor) try: h = self.vcns.create_health_monitor( pool_mapping['edge_id'], edge_monitor)[0] edge_mon_id = lb_common.extract_resource_id(h['location']) except nsxv_exc.VcnsApiException: self.lbv1_driver.pool_health_monitor_failed(context, health_monitor, pool_id) with excutils.save_and_reraise_exception(): LOG.error( _LE('Failed to associate monitor on edge: %s'), pool_mapping['edge_id']) try: # Associate monitor with Edge pool edge_pool = self.vcns.get_pool(pool_mapping['edge_id'], pool_mapping['edge_pool_id'])[1] if edge_pool['monitorId']: edge_pool['monitorId'].append(edge_mon_id) else: edge_pool['monitorId'] = [edge_mon_id] self.vcns.update_pool(pool_mapping['edge_id'], pool_mapping['edge_pool_id'], edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.pool_health_monitor_failed(context, health_monitor, pool_id) LOG.error( _LE('Failed to associate monitor on edge: %s'), pool_mapping['edge_id']) self.lbv1_driver.create_pool_health_monitor_successful( context, health_monitor, pool_id, pool_mapping['edge_id'], edge_mon_id)
def change_edge_appliance_size(properties): size = properties.get('size') if size not in nsxv_constants.ALLOWED_EDGE_SIZES: LOG.error(_LE("Edge appliance size not in %(size)s"), {'size': nsxv_constants.ALLOWED_EDGE_SIZES}) return try: nsxv.change_edge_appliance_size( properties.get('edge-id'), size) except nsxv_exceptions.ResourceNotFound as e: LOG.error(_LE("Edge %s not found"), properties.get('edge-id')) except exceptions.NeutronException as e: LOG.error(_LE("%s"), str(e))
def request(self, method, url, body="", content_type="application/json"): '''Issues request to controller.''' g = eventlet_request.GenericRequestEventlet( self, method, url, body, content_type, auto_login=True, http_timeout=self._http_timeout, retries=self._retries, redirects=self._redirects) g.start() response = g.join() LOG.debug('Request returns "%s"', response) # response is a modified HTTPResponse object or None. # response.read() will not work on response as the underlying library # request_eventlet.ApiRequestEventlet has already called this # method in order to extract the body and headers for processing. # ApiRequestEventlet derived classes call .read() and # .getheaders() on the HTTPResponse objects and store the results in # the response object's .body and .headers data members for future # access. if response is None: # Timeout. LOG.error(_LE('Request timed out: %(method)s to %(url)s'), {'method': method, 'url': url}) raise exception.RequestTimeout() status = response.status if status == httplib.UNAUTHORIZED: raise exception.UnAuthorizedRequest() # Fail-fast: Check for exception conditions and raise the # appropriate exceptions for known error codes. if status in exception.ERROR_MAPPINGS: LOG.error(_LE("Received error code: %s"), status) LOG.error(_LE("Server Error Message: %s"), response.body) exception.ERROR_MAPPINGS[status](response) # Continue processing for non-error condition. if (status != httplib.OK and status != httplib.CREATED and status != httplib.NO_CONTENT): LOG.error(_LE("%(method)s to %(url)s, unexpected response code: " "%(status)d (content = '%(body)s')"), {'method': method, 'url': url, 'status': response.status, 'body': response.body}) return None if not self._version: self._version = version.find_version(response.headers) return response.body
def update(self, context, old_pool, new_pool): edge_pool = { 'name': 'pool_' + new_pool.id, 'description': getattr(new_pool, 'description', getattr(new_pool, 'name')), 'algorithm': lb_const.BALANCE_MAP.get( new_pool.lb_algorithm, 'round-robin'), 'transparent': False } listener = new_pool.listener lb_id = listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener.id, new_pool.id) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] try: with locking.LockManager.get_lock(edge_id): self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) self.lbv2_driver.pool.successful_completion(context, new_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.pool.failed_completion(context, new_pool) LOG.error(_LE('Failed to update pool %s'), new_pool['id'])
def run(self): while True: try: if self._stopped: # Gracefully terminate this thread if the _stopped # attribute was set to true LOG.info(_LI("Stopping TaskManager")) break # get a task from queue, or timeout for periodic status check task = self._get_task() if task.resource_id in self._tasks: # this resource already has some tasks under processing, # append the task to same queue for ordered processing self._enqueue(task) continue try: self._main_thread_exec_task = task self._execute(task) finally: self._main_thread_exec_task = None if task.status is None: # The thread is killed during _execute(). To guarantee # the task been aborted correctly, put it to the queue. self._enqueue(task) elif task.status != constants.TaskStatus.PENDING: self._result(task) else: self._enqueue(task) except Exception: LOG.exception(_LE("TaskManager terminating because " "of an exception")) break
def _build_gateway_device_body(tenant_id, display_name, neutron_id, connector_type, connector_ip, client_certificate, tz_uuid): connector_type_mappings = { utils.NetworkTypes.STT: "STTConnector", utils.NetworkTypes.GRE: "GREConnector", utils.NetworkTypes.BRIDGE: "BridgeConnector", 'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT", 'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE", 'ipsec_%s' % utils.NetworkTypes.STT: "IPsecSTT", 'ipsec_%s' % utils.NetworkTypes.GRE: "IPsecGRE"} nsx_connector_type = connector_type_mappings.get(connector_type) if connector_type and not nsx_connector_type: LOG.error(_LE("There is no NSX mapping for connector type %s"), connector_type) raise nsx_exc.InvalidTransportType(transport_type=connector_type) body = {"display_name": utils.check_and_truncate(display_name), "tags": utils.get_tags(os_tid=tenant_id, q_gw_dev_id=neutron_id), "admin_status_enabled": True} if connector_ip and nsx_connector_type: body["transport_connectors"] = [ {"transport_zone_uuid": tz_uuid, "ip_address": connector_ip, "type": nsx_connector_type}] if client_certificate: body["credential"] = {"client_certificate": {"pem_encoded": client_certificate}, "type": "SecurityCertificateCredential"} return body
def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): """Retrieve LSN and LSN port for the network and the subnet.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) if lsn_id: try: lsn_port_id = lsn_api.lsn_port_by_subnet_get( self.cluster, lsn_id, subnet_id) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error(_LE('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s'), {'lsn_id': lsn_id, 'subnet_id': subnet_id}) raise p_exc.LsnPortNotFound(lsn_id=lsn_id, entity='subnet', entity_id=subnet_id) else: LOG.warning(_LW('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s'), {'lsn_id': lsn_id, 'subnet_id': subnet_id}) return (lsn_id, None) else: return (lsn_id, lsn_port_id) else: return (None, None)
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=utils.check_and_truncate(display_name), tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id))) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid try: result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Updated logical port %(result)s " "on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result except exception.NotFound as e: LOG.error(_LE("Port or Network not found, Error: %s"), str(e)) raise exception.PortNotFoundOnNetwork( port_id=lport_uuid, net_id=lswitch_uuid)
def _update_nat_rule(self, task): # TODO(fank): use POST for optimization # return rule_id for future reference edge_id = task.userdata['edge_id'] if task != self.updated_task['nat'][edge_id]: # this task does not have the latest config, abort now # for speedup return task_constants.TaskStatus.ABORT rules = task.userdata['rules'] LOG.debug("VCNS: start updating nat rules: %s", rules) nat = { 'featureType': 'nat', 'rules': { 'natRulesDtos': rules } } try: self.vcns.update_nat_config(edge_id, nat) status = task_constants.TaskStatus.COMPLETED except exceptions.VcnsApiException as e: LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"), e.response) status = task_constants.TaskStatus.ERROR return status
def _create_nat_rule(self, task): # TODO(fank): use POST for optimization # return rule_id for future reference rule = task.userdata['rule'] LOG.debug("VCNS: start creating nat rules: %s", rule) edge_id = task.userdata['edge_id'] nat = self.get_nat_config(edge_id) location = task.userdata['location'] del nat['version'] if location is None or location == constants.APPEND: nat['rules']['natRulesDtos'].append(rule) else: nat['rules']['natRulesDtos'].insert(location, rule) try: self.vcns.update_nat_config(edge_id, nat) status = task_constants.TaskStatus.COMPLETED except exceptions.VcnsApiException as e: LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"), e.response) status = task_constants.TaskStatus.ERROR return status
def create_bridge(self, device_name, bridge): try: self.vcns.create_bridge(device_name, bridge) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to create bridge in the %s"), device_name)
def _subnet_create(self, context, subnet, clean_on_err=True): if subnet['enable_dhcp']: network_id = subnet['network_id'] # Create port for DHCP service dhcp_port = { "name": "", "admin_state_up": True, "device_id": "", "device_owner": const.DEVICE_OWNER_DHCP, "network_id": network_id, "tenant_id": subnet["tenant_id"], "mac_address": const.ATTR_NOT_SPECIFIED, "fixed_ips": [{"subnet_id": subnet['id']}] } try: # This will end up calling handle_port_dhcp_access # down below as well as handle_port_metadata_access self.plugin.create_port(context, {'port': dhcp_port}) except p_exc.PortConfigurationError as e: LOG.error(_LE("Error while creating subnet %(cidr)s for " "network %(network)s. Please, contact " "administrator"), {"cidr": subnet["cidr"], "network": network_id}) db_base_plugin_v2.NeutronDbPluginV2.delete_port( self.plugin, context, e.port_id) if clean_on_err: self.plugin.delete_subnet(context, subnet['id']) raise n_exc.Conflict()
def delete(self, context, pool): listener = pool.listener lb_id = listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener.id, pool.id) listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener.id) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] try: vse = listener_mgr.listener_to_edge_vse( listener, lb_binding['vip_address'], None, listener_binding['app_profile_id']) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse) self.vcns.delete_pool(edge_id, edge_pool_id) self.lbv2_driver.pool.successful_completion( context, pool, delete=True) nsxv_db.del_nsxv_lbaas_pool_binding( context.session, lb_id, listener.id, pool.id) except nsxv_exc.VcnsApiException: self.lbv2_driver.pool.failed_completion(context, pool) LOG.error(_LE('Failed to delete pool %s'), pool['id'])
def get_nat_config(self, edge_id): try: return self.vcns.get_nat_config(edge_id)[1] except exceptions.VcnsApiException as e: LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"), e.response) raise e
def _get_firewall(self, context, edge_id): try: return self.vcns.get_firewall(edge_id)[1] except vcns_exc.VcnsApiException as e: LOG.exception(_LE("Failed to get firewall with edge " "id: %s"), edge_id) raise e
def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): """Retrieve LSN and LSN port given network and mac address.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) if lsn_id: try: lsn_port_id = lsn_api.lsn_port_by_mac_get( self.cluster, lsn_id, mac) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error(_LE('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and mac address ' '%(mac)s'), {'lsn_id': lsn_id, 'mac': mac}) raise p_exc.LsnPortNotFound(lsn_id=lsn_id, entity='MAC', entity_id=mac) else: LOG.warning(_LW('Unable to find Logical Service Node ' 'Port for LSN %(lsn_id)s and mac address ' '%(mac)s'), {'lsn_id': lsn_id, 'mac': mac}) return (lsn_id, None) else: return (lsn_id, lsn_port_id) else: return (None, None)
def _loopingcall_callback(): self._monitor_busy = True try: self._check_pending_tasks() except Exception: LOG.exception(_LE("Exception in _check_pending_tasks")) self._monitor_busy = False
def create(self, context, lb): edge_id = lb_common.get_lbaas_edge_id_for_subnet( context, self.core_plugin, lb.vip_subnet_id, lb.tenant_id) if not edge_id: msg = _( 'No suitable Edge found for subnet %s') % lb.vip_subnet_id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) try: if not nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id): lb_common.enable_edge_acceleration(self.vcns, edge_id) lb_common.add_vip_as_secondary_ip(self.vcns, edge_id, lb.vip_address) edge_fw_rule_id = lb_common.add_vip_fw_rule( self.vcns, edge_id, lb.id, lb.vip_address) nsxv_db.add_nsxv_lbaas_loadbalancer_binding( context.session, lb.id, edge_id, edge_fw_rule_id, lb.vip_address) self.lbv2_driver.load_balancer.successful_completion(context, lb) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.load_balancer.failed_completion(context, lb) LOG.error(_LE('Failed to create pool %s'), lb.id)
def delete(self, context, member): listener = member.pool.listener lb_id = listener.loadbalancer_id lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener.id, member.pool_id) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] with locking.LockManager.get_lock(edge_id): edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] for i, m in enumerate(edge_pool['member']): if m['name'] == lb_common.get_member_id(member.id): edge_pool['member'].pop(i) break try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) self.lbv2_driver.member.successful_completion( context, member, delete=True) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv2_driver.member.failed_completion(context, member) LOG.error(_LE('Failed to delete member on edge: %s'), edge_id)
def update_firewall(self, context, edge_id, firewall): fw_req = self._convert_firewall(context, firewall) try: self.vcns.update_firewall(edge_id, fw_req) except vcns_exc.VcnsApiException as e: LOG.exception(_LE("Failed to update firewall " "with edge_id: %s"), edge_id) raise e fw_res = self._get_firewall(context, edge_id) nsxv_db.cleanup_nsxv_edge_firewallrule_binding(context.session, edge_id) self._create_rule_id_mapping(context, edge_id, firewall, fw_res)
def get_edge_status(self, edge_id): try: response = self.vcns.get_edge_status(edge_id)[1] status_level = self._edge_status_to_level(response['edgeStatus']) except exceptions.VcnsApiException as e: LOG.error( _LE("VCNS: Failed to get edge %(edge_id)s status: " "Reason: %(reason)s"), { 'edge_id': edge_id, 'reason': e.response }) status_level = constants.RouterStatus.ROUTER_STATUS_ERROR try: desc = jsonutils.loads(e.response) if desc.get('errorCode') == ( constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING): status_level = constants.RouterStatus.ROUTER_STATUS_DOWN except ValueError: LOG.error(_LE('Error code not present. %s'), e.response) return status_level
def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id): with session.begin(subtransactions=True): try: internal_edge = nsxv_models.NsxvInternalEdges( ext_ip_address=ext_ip_address, purpose=purpose, router_id=router_id) session.add(internal_edge) except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Duplicate internal Edge IP %s"), ext_ip_address)
def delete_vdr_internal_interface(self, edge_id, interface_index): LOG.debug("Delete VDR interface on edge: %s", edge_id) try: header, response = self.vcns.delete_vdr_internal_interface( edge_id, interface_index) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to delete vdr interface on edge: " "%s"), edge_id) # avoid bug 1389358 self.check_edge_jobs(edge_id)
def nsx_update_switch(resource, event, trigger, **kwargs): nsxv = utils.get_nsxv_client() if not kwargs.get('property'): LOG.error( _LE("Need to specify dvs-id parameter and " "attribute to update. Add --property dvs-id=<dvs-id> " "--property teamingpolicy=<policy>")) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dvs_id = properties.get('dvs-id') if not dvs_id: LOG.error( _LE("Need to specify dvs-id. " "Add --property dvs-id=<dvs-id>")) return try: h, switch = nsxv.get_vdn_switch(dvs_id) except exceptions.ResourceNotFound: LOG.error(_LE("DVS %s not found"), dvs_id) return supported_policies = [ 'ETHER_CHANNEL', 'LOADBALANCE_LOADBASED', 'LOADBALANCE_SRCID', 'LOADBALANCE_SRCMAC', 'FAILOVER_ORDER', 'LACP_ACTIVE', 'LACP_PASSIVE', 'LACP_V2' ] policy = properties.get('teamingpolicy') if policy in supported_policies: if switch['teamingPolicy'] == policy: LOG.info(_LI("Policy already set!")) return LOG.info( _LI("Updating NSXv switch %(dvs)s teaming policy to " "%(policy)s"), { 'dvs': dvs_id, 'policy': policy }) switch['teamingPolicy'] = policy try: switch = nsxv.update_vdn_switch(switch) except exceptions.VcnsApiException as e: desc = jsonutils.loads(e.response) details = desc.get('details') if details.startswith("No enum constant"): LOG.error(_LE("Unknown teaming policy %s"), policy) else: LOG.error(_LE("Unexpected error occurred: %s"), details) return LOG.info(_LI("Switch value after update: %s"), switch) else: LOG.info(_LI("Current switch value is: %s"), switch) LOG.error( _LE("Invalid teaming policy. " "Add --property teamingpolicy=<policy>")) LOG.error(_LE("Possible values: %s"), ', '.join(supported_policies))
def detach_vm_interface(self, vm_moref, device): new_spec = self._build_vm_spec_detach(device) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=new_spec) try: self._session.wait_for_task(task) LOG.info(_LI("Updated VM %(moref)s spec - detached an interface"), {'moref': vm_moref.value}) except Exception as e: LOG.error(_LE("Failed to reconfigure vm moref %(moref)s: %(e)s"), {'moref': vm_moref.value, 'e': e})
def _deallocate(self, address): try: self._vcns.release_ipam_ip_to_pool(self._nsx_pool_id, address) except vc_exc.VcnsApiException as e: LOG.error( _LE("NSX IPAM failed to free ip %(ip)s of subnet %(id)s:" " %(e)s"), { 'e': e.response, 'ip': address, 'id': self._subnet_id }) raise ipam_exc.IpAddressAllocationNotFound( subnet_id=self._subnet_id, ip_address=address)
def _result(self, task): """Notify task execution result.""" try: task._result_callback(task) except Exception: LOG.exception(_LE("Task %(task)s encountered exception in " "%(cb)s"), {'task': str(task), 'cb': str(task._result_callback)}) LOG.debug("Task %(task)s return %(status)s", {'task': str(task), 'status': task.status}) task._finished()
def _update_edge(self, task): edge_id = task.userdata['edge_id'] LOG.debug("start update edge %s", edge_id) request = task.userdata['request'] try: self.vcns.update_edge(edge_id, request) status = task_constants.TaskStatus.COMPLETED except exceptions.VcnsApiException as e: LOG.error(_LE("Failed to update edge: %s"), e.response) status = task_constants.TaskStatus.ERROR return status
def delete(self, context, lb): binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb.id) if binding: try: lb_common.del_vip_fw_rule(self.vcns, binding['edge_id'], binding['edge_fw_rule_id']) except nsxv_exc.VcnsApiException as e: LOG.error(_LE('Failed to delete loadbalancer %(lb)s FW rule. ' 'exception is %(exc)s'), {'lb': lb.id, 'exc': e}) try: lb_common.del_vip_as_secondary_ip(self.vcns, binding['edge_id'], lb.vip_address) except Exception as e: LOG.error(_LE('Failed to delete loadbalancer %(lb)s interface' ' IP. exception is %(exc)s'), {'lb': lb.id, 'exc': e}) nsxv_db.del_nsxv_lbaas_loadbalancer_binding(context.session, lb.id) self.lbv2_driver.load_balancer.successful_completion(context, lb, delete=True)
def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs): """Resync DHCP bindings on NSXv Edge""" if not kwargs.get('property'): LOG.error(_LE("Need to specify edge-id parameter")) return else: properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edge_id = properties.get('edge-id') if not edge_id: LOG.error(_LE("Need to specify edge-id parameter")) return LOG.info(_LI("Updating NSXv Edge: %s"), edge_id) # Need to create a plugin object; so that we are able to # do neutron list-ports. plugin = utils.NsxVPluginWrapper() nsxv_manager = vcns_driver.VcnsDriver(edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) try: edge_manager.update_dhcp_service_config(neutron_db.context, edge_id) except exceptions.ResourceNotFound: LOG.error(_LE("Edge %s not found"), edge_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id, pool_mapping, mon_mapping): LOG.debug('Deleting HM %s', health_monitor) edge_id = pool_mapping['edge_id'] if not mon_mapping: return with locking.LockManager.get_lock(pool_mapping['edge_id']): edge_pool = self.vcns.get_pool(edge_id, pool_mapping['edge_pool_id'])[1] edge_pool['monitorId'].remove(mon_mapping['edge_monitor_id']) try: self.vcns.update_pool(edge_id, pool_mapping['edge_pool_id'], edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.pool_health_monitor_failed( context, health_monitor, pool_id) LOG.error( _LE('Failed to delete monitor mapping on edge: %s'), mon_mapping['edge_id']) # If this monitor is not used on this edge anymore, delete it if not edge_pool['monitorId']: try: self.vcns.delete_health_monitor( mon_mapping['edge_id'], mon_mapping['edge_monitor_id']) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.pool_health_monitor_failed( context, health_monitor, pool_id) LOG.error(_LE('Failed to delete monitor on edge: %s'), mon_mapping['edge_id']) self.lbv1_driver.delete_pool_health_monitor_successful( context, health_monitor, pool_id, mon_mapping)
def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, ref_rule_id, edge_id) ref_vcns_rule_id = rule_map.rule_vseid fwr_vse_next = self._get_firewall_rule_next(context, edge_id, ref_vcns_rule_id) fwr_req = self._convert_firewall_rule(context, firewall_rule) if fwr_vse_next: ref_vcns_rule_id = fwr_vse_next['ruleId'] try: header = self.vcns.add_firewall_rule_above( edge_id, int(ref_vcns_rule_id), fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to add firewall rule above: " "%(rule_id)s with edge_id: %(edge_id)s"), { 'rule_id': ref_vcns_rule_id, 'edge_id': edge_id }) else: # append the rule at the bottom try: header = self.vcns.add_firewall_rule(edge_id, fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to append a firewall rule" "with edge_id: %s"), edge_id) objuri = header['location'] fwr_vseid = objuri[objuri.rfind("/") + 1:] map_info = { 'rule_id': firewall_rule['id'], 'rule_vseid': fwr_vseid, 'edge_id': edge_id } nsxv_db.add_nsxv_edge_firewallrule_binding(context.session, map_info)
def _get_internal_network_and_subnet(self): internal_net = None internal_subnet = None # Try to find internal net, internal subnet. If not found, create new net_list = nsxv_db.get_nsxv_internal_network( self.context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE) if net_list: internal_net = net_list[0]['network_id'] if internal_net: internal_subnet = self.nsxv_plugin.get_subnets( self.context, fields=['id'], filters={'network_id': [internal_net]})[0]['id'] if internal_net is None or internal_subnet is None: if cfg.CONF.nsxv.metadata_initializer: # Couldn't find net, subnet - create new try: internal_net, internal_subnet = ( self._create_metadata_internal_network(INTERNAL_SUBNET) ) except Exception as e: nsxv_db.delete_nsxv_internal_network( self.context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE) # if network is created, clean up if internal_net: self.nsxv_plugin.delete_network( self.context, internal_net) LOG.exception( _LE("Exception %s while creating internal " "network for metadata service"), e) return # Update the new network_id in DB nsxv_db.create_nsxv_internal_network( self.context.session, nsxv_constants.INTER_EDGE_PURPOSE, internal_net) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) return internal_net, internal_subnet
def get_port_status(cluster, lswitch_id, port_id): """Retrieve the operational status of the port.""" try: r = nsxlib.do_request(HTTP_GET, "/ws.v1/lswitch/%s/lport/%s/status" % (lswitch_id, port_id), cluster=cluster) except exception.NotFound as e: LOG.error(_LE("Port not found, Error: %s"), str(e)) raise exception.PortNotFoundOnNetwork( port_id=port_id, net_id=lswitch_id) if r['link_status_up'] is True: return constants.PORT_STATUS_ACTIVE else: return constants.PORT_STATUS_DOWN
def _asyn_update_firewall(self, task): edge_id = task.userdata['edge_id'] config = task.userdata['config'] context = task.userdata['jobdata']['context'] try: self.vcns.update_firewall(edge_id, config) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to update firewall " "with edge_id: %s"), edge_id) vcns_fw_config = self._get_firewall(context, edge_id) task.userdata['vcns_fw_config'] = vcns_fw_config return task_const.TaskStatus.COMPLETED
def _dvs_delete_network(self, context, id): network = self._get_network(context, id) dvs_id = self._dvs_get_id(network) bindings = nsx_db.get_network_bindings(context.session, id) with context.session.begin(subtransactions=True): nsx_db.delete_network_bindings(context.session, id) super(NsxDvsV2, self).delete_network(context, id) try: if (not bindings or bindings[0].binding_type != c_utils.NetworkTypes.PORTGROUP): self._dvs.delete_port_group(dvs_id) except Exception: LOG.exception(_LE('Unable to delete DVS port group %s'), id) self.handle_network_dhcp_access(context, id, action='delete_network')
def _invoke_monitor(self, state): for func in self._monitors[state]: try: func(self) except Exception: LOG.exception(_LE("Task %(task)s encountered exception in " "%(func)s at state %(state)s"), {'task': str(self), 'func': str(func), 'state': state}) self._move_state(state) return self
def delete_backend_network(resource, event, trigger, **kwargs): """Delete a backend network by its moref """ errmsg = ("Need to specify moref property. Add --property moref=<moref>") if not kwargs.get('property'): LOG.error(_LE("%s"), errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) moref = properties.get('moref') if not moref: LOG.error(_LE("%s"), errmsg) return backend_name = get_networks_name_map().get(moref) if not backend_name: LOG.error(_LE("Failed to find the backend network %(moref)s"), {'moref': moref}) return # Note: in case the backend network is attached to other backend objects, # like VM, the deleting may fail and through an exception nsxv = utils.get_nsxv_client() if moref.startswith(PORTGROUP_PREFIX): # get the dvs id from the backend name: dvs_id = get_dvs_id_from_backend_name(backend_name) if not dvs_id: LOG.error( _LE("Failed to find the DVS id of backend network " "%(moref)s"), {'moref': moref}) else: try: nsxv.delete_port_group(dvs_id, moref) except Exception as e: LOG.error( _LE("Failed to delete backend network %(moref)s : " "%(e)s"), { 'moref': moref, 'e': e }) else: LOG.info(_LI("Backend network %(moref)s was deleted"), {'moref': moref}) else: # Virtual wire try: nsxv.delete_virtual_wire(moref) except Exception as e: LOG.error( _LE("Failed to delete backend network %(moref)s : " "%(e)s"), { 'moref': moref, 'e': e }) else: LOG.info(_LI("Backend network %(moref)s was deleted"), {'moref': moref})
def delete_vip(self, context, vip, vip_mapping): LOG.debug('Deleting VIP %s', vip) if not vip_mapping: LOG.error(_LE('No mapping found for vip %s'), vip['id']) else: edge_id = vip_mapping['edge_id'] edge_vse_id = vip_mapping['edge_vse_id'] app_profile_id = vip_mapping['edge_app_profile_id'] try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_vip(edge_id, edge_vse_id) lb_common.del_vip_as_secondary_ip(self.vcns, edge_id, vip['address']) lb_common.del_vip_fw_rule(self.vcns, edge_id, vip_mapping['edge_fw_rule_id']) except nsxv_exc.ResourceNotFound: LOG.error(_LE('vip not found on edge: %s'), edge_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.vip_failed(context, vip) LOG.error(_LE('Failed to delete vip on edge: %s'), edge_id) try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_app_profile(edge_id, app_profile_id) except nsxv_exc.ResourceNotFound: LOG.error(_LE('app profile not found on edge: %s'), edge_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.vip_failed(context, vip) LOG.error(_LE('Failed to delete app profile on Edge: %s'), edge_id) self.lbv1_driver.delete_vip_successful(context, vip)
def handle_port_dhcp_access(plugin, context, port, action): LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"), {"action": action, "resource": port}) if port["device_owner"] == const.DEVICE_OWNER_DHCP: network_id = port["network_id"] if action == "create_port": # at this point the port must have a subnet and a fixed ip subnet_id = port["fixed_ips"][0]['subnet_id'] subnet = plugin.get_subnet(context, subnet_id) subnet_data = { "mac_address": port["mac_address"], "ip_address": subnet['cidr'], "subnet_id": subnet['id'] } try: plugin.lsn_manager.lsn_port_dhcp_setup( context, network_id, port['id'], subnet_data, subnet) except p_exc.PortConfigurationError: LOG.error(_LE("Error while configuring DHCP for " "port %s"), port['id']) raise n_exc.NeutronException() elif action == "delete_port": plugin.lsn_manager.lsn_port_dispose(context, network_id, port['mac_address']) elif port["device_owner"] != const.DEVICE_OWNER_DHCP: if port.get("fixed_ips"): # do something only if there are IP's and dhcp is enabled subnet_id = port["fixed_ips"][0]['subnet_id'] if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: LOG.info(_LI("DHCP is disabled for subnet %s: nothing " "to do"), subnet_id) return host_data = { "mac_address": port["mac_address"], "ip_address": port["fixed_ips"][0]['ip_address'] } network_id = port["network_id"] if action == "create_port": handler = plugin.lsn_manager.lsn_port_dhcp_host_add elif action == "delete_port": handler = plugin.lsn_manager.lsn_port_dhcp_host_remove try: handler(context, network_id, subnet_id, host_data) except p_exc.PortConfigurationError: with excutils.save_and_reraise_exception(): if action == 'create_port': db_base_plugin_v2.NeutronDbPluginV2.delete_port( plugin, context, port['id']) LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
def change_edge_syslog(properties): request = { 'featureType': 'syslog', 'serverAddresses': {'ipAddress': [], 'type': 'IpAddressesDto'}} request['protocol'] = properties.get('syslog-proto', 'tcp') if request['protocol'] not in ['tcp', 'udp']: LOG.error(_LE("Property value error: syslog-proto must be tcp/udp")) return if properties.get('syslog-server'): request['serverAddresses']['ipAddress'].append( properties.get('syslog-server')) if properties.get('syslog-server2'): request['serverAddresses']['ipAddress'].append( properties.get('syslog-server2')) edge_id = properties.get('edge-id') try: nsxv.update_edge_syslog(edge_id, request) except nsxv_exceptions.ResourceNotFound as e: LOG.error(_LE("Edge %s not found"), edge_id) except exceptions.NeutronException as e: LOG.error(_LE("%s"), str(e))
def _register_types(self): for ext in self: router_type = ext.obj.get_type() if router_type in self.drivers: LOG.error( _LE("Type driver '%(new_driver)s' ignored because " "type driver '%(old_driver)s' is already " "registered for type '%(type)s'"), { 'new_driver': ext.name, 'old_driver': self.drivers[router_type].name, 'type': router_type }) else: self.drivers[router_type] = ext LOG.info(_LI("Registered types: %s"), self.drivers.keys())
def lsn_get(self, context, network_id, raise_on_err=True): """Retrieve the LSN id associated to the network.""" try: return lsn_api.lsn_for_network_get(self.cluster, network_id) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error(_LE('Unable to find Logical Service Node for ' 'network %s.'), network_id) raise p_exc.LsnNotFound(entity='network', entity_id=network_id) else: LOG.warning(_LW('Unable to find Logical Service Node for ' 'the requested network %s.'), network_id)
def delete_port_group(self, net_id): """Delete a specific port group.""" moref = self._net_id_to_moref(net_id) task = self._session.invoke_api(self._session.vim, 'Destroy_Task', moref) try: self._session.wait_for_task(task) except Exception: # NOTE(garyk): handle more specific exceptions with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to delete port group for %s.'), net_id) LOG.info(_LI("%(net_id)s delete from %(dvs)s."), {'net_id': net_id, 'dvs': self._dvs_moref.value})
def delete_firewall_rule(self, context, id, edge_id): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) vcns_rule_id = rule_map.rule_vseid try: self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to delete firewall rule: " "%(rule_id)s " "with edge_id: %(edge_id)s"), { 'rule_id': id, 'edge_id': edge_id }) nsxv_db.delete_nsxv_edge_firewallrule_binding(context.session, id)
def update_firewall_rule(self, context, id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) vcns_rule_id = rule_map.rule_vseid fwr_req = self._convert_firewall_rule(context, firewall_rule) try: self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Failed to update firewall rule: " "%(rule_id)s " "with edge_id: %(edge_id)s"), { 'rule_id': id, 'edge_id': edge_id })
def attach_vm_interface(self, vm_moref, neutron_port_id, port_mac, nsx_net_id, device_type): new_spec = self._build_vm_spec_attach( neutron_port_id, port_mac, nsx_net_id, device_type) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=new_spec) try: self._session.wait_for_task(task) LOG.info(_LI("Updated VM moref %(moref)s spec - " "attached an interface"), {'moref': vm_moref.value}) except Exception as e: LOG.error(_LE("Failed to reconfigure VM %(moref)s spec: %(e)s"), {'moref': vm_moref.value, 'e': e})
def update_pool(self, context, old_pool, pool, pool_mapping): LOG.debug('Updating pool %s to %s', old_pool, pool) edge_pool = convert_lbaas_pool(pool) try: with locking.LockManager.get_lock(pool_mapping['edge_id']): curr_pool = self.vcns.get_pool(pool_mapping['edge_id'], pool_mapping['edge_pool_id'])[1] curr_pool.update(edge_pool) self.vcns.update_pool(pool_mapping['edge_id'], pool_mapping['edge_pool_id'], curr_pool) self.lbv1_driver.pool_successful(context, pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): self.lbv1_driver.pool_failed(context, pool) LOG.error(_LE('Failed to update pool %s'), pool['id'])
def stats(self, context, pool_id, pool_mapping): LOG.debug('Retrieving stats for pool %s', pool_id) try: lb_stats = self.vcns.get_loadbalancer_statistics( pool_mapping['edge_id']) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error( _LE('Failed to read load balancer statistics, edge: %s'), pool_mapping['edge_id']) pools_stats = lb_stats[1].get('pool', []) plugin = self._get_lb_plugin() members = plugin.get_members(context, filters={'pool_id': [pool_id]}, fields=['id', 'status']) member_map = {m['id']: m['status'] for m in members} for pool_stats in pools_stats: if pool_stats['poolId'] == pool_mapping['edge_pool_id']: stats = { 'bytes_in': pool_stats.get('bytesIn', 0), 'bytes_out': pool_stats.get('bytesOut', 0), 'active_connections': pool_stats.get('curSessions', 0), 'total_connections': pool_stats.get('totalSessions', 0) } member_stats = {} for member in pool_stats.get('member', []): member_id = member['name'][len(lb_common.MEMBER_ID_PFX):] if member_map[member_id] != 'ERROR': member_stats[member_id] = { 'status': ('INACTIVE' if member['status'] == 'DOWN' else 'ACTIVE') } stats['members'] = member_stats return stats return { 'bytes_in': 0, 'bytes_out': 0, 'active_connections': 0, 'total_connections': 0 }