def json2AS3Obj(jdata, network_id, segmentation_id): action = 'deploy' # if CONF.f5_agent.dry_run: # action = 'dry-run' decl = AS3(persist=True, action=action, _log_level=LOG.logger.level) adc = ADC(id="urn:uuid:{}".format(uuid.uuid4()), label="F5 BigIP Octavia Provider") decl.set_adc(adc) # if not CONF.f5_agent.migration and CONF.f5_agent.sync_to_group: # # No group syncing if we are in migration mode # decl.set_sync_to_group(CONF.f5_agent.sync_to_group) def dictDecoder(mydict): return namedtuple('LBObj', mydict.keys())(*mydict.values()) lb = json.loads(jdata, object_hook=dictDecoder) loadbalancer = lb.loadbalancer project_id = loadbalancer.project_id tenant = adc.get_or_create_tenant(m_part.get_name(network_id), defaultRouteDomain=segmentation_id, label='{}{}'.format( constants.PREFIX_PROJECT, project_id or 'none')) # Skip load balancer in (pending) deletion if loadbalancer.provisioning_status in [ constants.PENDING_DELETE, constants.DELETED ]: return # Create generic application app = Application(constants.APPLICATION_GENERIC, label=loadbalancer.id) # Attach Octavia listeners as AS3 service objects for listener in loadbalancer.listeners: if not driver_utils.pending_delete(listener): try: # service_entities = m_service.get_service(listener, self.cert_manager, self._esd_repo) service_entities = m_service.get_service( loadbalancer, listener) app.add_entities(service_entities) except o_exceptions.CertificateRetrievalException as e: LOG.error( "Could not retrieve certificate, skipping listener '%s': %s", listener.id, e) # Attach pools for pool in loadbalancer.pools: if not driver_utils.pending_delete(pool): app.add_entities(m_pool.get_pool(pool)) # Attach newly created application tenant.add_application(m_app.get_name(loadbalancer.id), app) return decl.to_json()
def get_tenant(segmentation_id, loadbalancers, status_manager, cert_manager, esd_repo): project_id = None if loadbalancers: project_id = loadbalancers[-1].project_id tenant_dict = {} if segmentation_id: tenant_dict['label'] = '{}{}'.format(constants.PREFIX_PROJECT, project_id or 'none') tenant_dict['defaultRouteDomain'] = segmentation_id tenant = as3.Tenant(**tenant_dict) # Skip members that re-use load balancer vips loadbalancer_ips = [ load_balancer.vip.ip_address for load_balancer in loadbalancers if not driver_utils.pending_delete(load_balancer) ] for loadbalancer in loadbalancers: # Skip load balancer in (pending) deletion if loadbalancer.provisioning_status in [constants.PENDING_DELETE]: continue # Create generic application app = Application(constants.APPLICATION_GENERIC, label=loadbalancer.id) # Attach Octavia listeners as AS3 service objects for listener in loadbalancer.listeners: if not driver_utils.pending_delete(listener): try: service_entities = m_service.get_service( listener, cert_manager, esd_repo) app.add_entities(service_entities) except o_exceptions.CertificateRetrievalException as e: if getattr(e, 'status_code', 0) != 400: # Error connecting to keystore, skip tenant update raise e LOG.error( "Could not retrieve certificate, assuming it is deleted, skipping listener '%s': %s", listener.id, e) if status_manager: # Key / Container not found in keystore status_manager.set_error(listener) # Attach pools for pool in loadbalancer.pools: if not driver_utils.pending_delete(pool): app.add_entities( m_pool.get_pool(pool, loadbalancer_ips, status_manager)) # Attach newly created application tenant.add_application(m_app.get_name(loadbalancer.id), app) return tenant
def get_pool(pool, loadbalancer_ips, status): """Map Octavia Pool -> AS3 Pool object :param pool: octavia pool object :param loadbalancer_ips: already used loadbalancer_ips :param status: status manager instance :return: AS3 pool """ # Entities is a list of tuples, which each describe AS3 objects # which may reference each other but do not form a hierarchy. entities = [] lbaas_lb_method = pool.lb_algorithm.upper() lbmode = _set_lb_method(lbaas_lb_method, pool.members) service_args = { 'label': as3types.f5label(pool.name or pool.description), 'remark': as3types.f5remark(pool.description or pool.name), 'loadBalancingMode': lbmode, 'members': [], } enable_priority_group = any([member.backup for member in pool.members]) for member in pool.members: if not utils.pending_delete(member): if member.ip_address in loadbalancer_ips: LOG.warning("The member address %s of member %s (pool %s, LB %s) is already in use by another load balancer.", member.ip_address, member.id, member.pool.id, member.pool.load_balancer.id) if status: status.set_error(member) continue if member.ip_address == '0.0.0.0': LOG.warning("The member address 0.0.0.0 of member %s is prohibited.", member.id) if status: status.set_error(member) continue service_args['members'].append( m_member.get_member(member, enable_priority_group, pool.health_monitor)) # add custom member monitors if pool.health_monitor and (member.monitor_address or member.monitor_port): member_hm = m_monitor.get_monitor(pool.health_monitor, member.monitor_address or member.ip_address, member.monitor_port or member.protocol_port) entities.append((m_monitor.get_name(member.id), member_hm)) if pool.health_monitor and not utils.pending_delete( pool.health_monitor): monitor_name = m_monitor.get_name(pool.health_monitor.id) entities.append((monitor_name, m_monitor.get_monitor(pool.health_monitor))) service_args['monitors'] = [Pointer(use=monitor_name)] entities.append((get_name(pool.id), Pool(**service_args))) return entities
def tenant_update(bigip, cert_manager, tenant, loadbalancers, segmentation_id): """Task to update F5s with all specified loadbalancers' configurations of a tenant (project). :param bigip: bigip instance :param cert_manager: CertManagerWrapper instance :param tenant: tenant_id/project_id :param loadbalancers: loadbalancer to update :param segmentation_id: segmentation_id of the loadbalancers :return: requests post result """ action = 'deploy' if CONF.f5_agent.dry_run: action = 'dry-run' decl = AS3(persist=True, action=action, syncToGroup=CONF.f5_agent.sync_to_group, _log_level=LOG.logger.level) adc = ADC(id="urn:uuid:{}".format(uuid.uuid4()), label="F5 BigIP Octavia Provider") decl.set_adc(adc) tenant = adc.get_or_create_tenant(m_part.get_name(tenant), defaultRouteDomain=segmentation_id) for loadbalancer in loadbalancers: if utils.pending_delete(loadbalancer): continue # Create generic application app = Application(constants.APPLICATION_GENERIC, label=loadbalancer.id) # Attach Octavia listeners as AS3 service objects for listener in loadbalancer.listeners: if not utils.pending_delete(listener): service_entities = m_service.get_service( listener, cert_manager, bigip.esd) app.add_entities(service_entities) # Attach pools for pool in loadbalancer.pools: if not utils.pending_delete(pool): app.add_entities(m_pool.get_pool(pool)) # Attach newly created application tenant.add_application(m_app.get_name(loadbalancer.id), app) return bigip.post(json=decl.to_json())
def workaround_autotool_1469(network_id, loadbalancer_id, pool, bigips): """ This is a workaround for F5 TMSH / AS3 Bug tracked as 527004 / AUTOTOOL-1469. -> Custom Monitor noted as in-use and cannot be removed Workaround tries to unassociate monitor manually and without transactions via iControl REST API. :param loadbalancers: loadbalancers :param bigips: bigips """ if pool.health_monitor and driver_utils.pending_delete( pool.health_monitor): LOG.info("Disassociating health-monitor '%s'", pool.health_monitor.id) for bigip in bigips: try: pool_resource_path = '{pool_path}/~{net_id}~{lb_id}~{pool_id}'.format( pool_path=F5_POOL_PATH, net_id=m_tenant.get_name(network_id), lb_id=m_app.get_name(loadbalancer_id), pool_id=m_pool.get_name(pool.id)) pool_json = bigip.get(pool_resource_path) if pool_json.ok: pool_dict = pool_json.json() if 'monitor' in pool_dict: pool_dict['monitor'] = None bigip.put(pool_resource_path, json=pool_dict) else: LOG.warning( "Disassociating health-monitor '%s' failed: %s", pool.health_monitor.id, pool_json.text) except exceptions.AS3Exception as e: LOG.warning("Disassociating health-monitor '%s' failed: %s", pool.health_monitor.id, e)
def _set_deleted_or_active(obj): """Sets octavia object to deleted if status was PENDING_DELETE :param obj: octavia object """ if utils.pending_delete(obj): self.set_deleted(obj) else: self.set_active(obj)
def get_pool(pool): """Map Octavia Pool -> AS3 Pool object :param pool: octavia pool object :return: AS3 pool """ # Entities is a list of tuples, which each describe AS3 objects # which may reference each other but do not form a hierarchy. entities = [] lbaas_lb_method = pool.lb_algorithm.upper() lbmode = _set_lb_method(lbaas_lb_method, pool.members) service_args = { 'label': as3types.f5label(pool.name or pool.id), 'remark': as3types.f5remark(pool.description or pool.id), 'loadBalancingMode': lbmode, 'members': [], } for member in pool.members: # Ignore backup members, will be handled by service if not utils.pending_delete(member) and not member.backup: service_args['members'].append(m_member.get_member(member)) #if pool.health_monitor and not utils.pending_delete( # pool.health_monitor): # Workaround for Monitor deletion bug in AS3, dereference but remain HM if pool.health_monitor: hms = m_monitor.get_monitors(pool.health_monitor, pool.members) entities.extend(hms) # Part of the workaround if not utils.pending_delete(pool.health_monitor): service_args['monitors'] = [Pointer(use=name) for name, _ in hms] entities.append((get_name(pool.id), Pool(**service_args))) return entities
def get_monitors(health_monitor, members): # Check if all members have a custom address/port, so we could just adapt the monitor monitor_addresses = [ member.monitor_address for member in members if member.monitor_port is not None ] monitor_ports = [ member.monitor_port for member in members if member.monitor_port is not None ] ref_addr = None ref_port = None if monitor_addresses: ref_addr = monitor_addresses[0] if monitor_ports: ref_port = monitor_ports[0] if all(x == ref_addr for x in monitor_addresses) and all(x == ref_port for x in monitor_ports): return [(get_name(health_monitor.id), get_monitor(health_monitor, ref_addr, ref_port))] # Create the standard health monitor without custom addresses/ports entities = [(get_name(health_monitor.id), get_monitor(health_monitor))] # Create additional custom health monitors with custom addresses/ports for member in members: # Custom member address if not utils.pending_delete(member) and (member.monitor_address or member.monitor_port): member_hm = get_monitor(health_monitor, member.monitor_address, member.monitor_port) name = get_name(member.id) entities.append((name, member_hm)) return entities
def _set_lb_method(lbaas_lb_method, members): """Set pool lb method depending on member attributes. :param lbaas_lb_method: octavia loadbalancing method :param members: octavia members :return: F5 load balancing method """ lb_method = _get_lb_method(lbaas_lb_method) if lb_method == 'SOURCE_IP': return lb_method member_has_weight = False for member in members: if not utils.pending_delete(member) and member.weight > 1: member_has_weight = True break if member_has_weight: if lb_method == 'LEAST_CONNECTIONS': return _get_lb_method('RATIO_LEAST_CONNECTIONS') return _get_lb_method('RATIO') return lb_method
def status_dict(self, obj, cascade=False): """ Returns status dict for octavia object, deleted if status was PENDING_DELETE, else active. Ignores error status. :param obj: octavia object """ # Cascade Delete: force deleted if cascade: return [self._status_obj(obj, lib_consts.DELETED)] # Don't update errored objects if obj.provisioning_status == lib_consts.ERROR: return [] # Don't update already active objects: if obj.provisioning_status == lib_consts.ACTIVE: return [] if utils.pending_delete(obj): return [self._status_obj(obj, lib_consts.DELETED)] else: return [self._status_obj(obj, lib_consts.ACTIVE)]