def workaround_autotool_1469(network_id, loadbalancer_id, pool, bigips): """ This is a workaround for F5 TMSH / AS3 Bug tracked as 527004 / AUTOTOOL-1469. -> Custom Monitor noted as in-use and cannot be removed Workaround tries to unassociate monitor manually and without transactions via iControl REST API. :param loadbalancers: loadbalancers :param bigips: bigips """ if pool.health_monitor and driver_utils.pending_delete( pool.health_monitor): LOG.info("Disassociating health-monitor '%s'", pool.health_monitor.id) for bigip in bigips: try: pool_resource_path = '{pool_path}/~{net_id}~{lb_id}~{pool_id}'.format( pool_path=F5_POOL_PATH, net_id=m_tenant.get_name(network_id), lb_id=m_app.get_name(loadbalancer_id), pool_id=m_pool.get_name(pool.id)) pool_json = bigip.get(pool_resource_path) if pool_json.ok: pool_dict = pool_json.json() if 'monitor' in pool_dict: pool_dict['monitor'] = None bigip.put(pool_resource_path, json=pool_dict) else: LOG.warning( "Disassociating health-monitor '%s' failed: %s", pool.health_monitor.id, pool_json.text) except exceptions.AS3Exception as e: LOG.warning("Disassociating health-monitor '%s' failed: %s", pool.health_monitor.id, e)
def json2AS3Obj(jdata, network_id, segmentation_id): action = 'deploy' # if CONF.f5_agent.dry_run: # action = 'dry-run' decl = AS3(persist=True, action=action, _log_level=LOG.logger.level) adc = ADC(id="urn:uuid:{}".format(uuid.uuid4()), label="F5 BigIP Octavia Provider") decl.set_adc(adc) # if not CONF.f5_agent.migration and CONF.f5_agent.sync_to_group: # # No group syncing if we are in migration mode # decl.set_sync_to_group(CONF.f5_agent.sync_to_group) def dictDecoder(mydict): return namedtuple('LBObj', mydict.keys())(*mydict.values()) lb = json.loads(jdata, object_hook=dictDecoder) loadbalancer = lb.loadbalancer project_id = loadbalancer.project_id tenant = adc.get_or_create_tenant(m_part.get_name(network_id), defaultRouteDomain=segmentation_id, label='{}{}'.format( constants.PREFIX_PROJECT, project_id or 'none')) # Skip load balancer in (pending) deletion if loadbalancer.provisioning_status in [ constants.PENDING_DELETE, constants.DELETED ]: return # Create generic application app = Application(constants.APPLICATION_GENERIC, label=loadbalancer.id) # Attach Octavia listeners as AS3 service objects for listener in loadbalancer.listeners: if not driver_utils.pending_delete(listener): try: # service_entities = m_service.get_service(listener, self.cert_manager, self._esd_repo) service_entities = m_service.get_service( loadbalancer, listener) app.add_entities(service_entities) except o_exceptions.CertificateRetrievalException as e: LOG.error( "Could not retrieve certificate, skipping listener '%s': %s", listener.id, e) # Attach pools for pool in loadbalancer.pools: if not driver_utils.pending_delete(pool): app.add_entities(m_pool.get_pool(pool)) # Attach newly created application tenant.add_application(m_app.get_name(loadbalancer.id), app) return decl.to_json()
def get_tenant(segmentation_id, loadbalancers, status_manager, cert_manager, esd_repo): project_id = None if loadbalancers: project_id = loadbalancers[-1].project_id tenant_dict = {} if segmentation_id: tenant_dict['label'] = '{}{}'.format(constants.PREFIX_PROJECT, project_id or 'none') tenant_dict['defaultRouteDomain'] = segmentation_id tenant = as3.Tenant(**tenant_dict) # Skip members that re-use load balancer vips loadbalancer_ips = [ load_balancer.vip.ip_address for load_balancer in loadbalancers if not driver_utils.pending_delete(load_balancer) ] for loadbalancer in loadbalancers: # Skip load balancer in (pending) deletion if loadbalancer.provisioning_status in [constants.PENDING_DELETE]: continue # Create generic application app = Application(constants.APPLICATION_GENERIC, label=loadbalancer.id) # Attach Octavia listeners as AS3 service objects for listener in loadbalancer.listeners: if not driver_utils.pending_delete(listener): try: service_entities = m_service.get_service( listener, cert_manager, esd_repo) app.add_entities(service_entities) except o_exceptions.CertificateRetrievalException as e: if getattr(e, 'status_code', 0) != 400: # Error connecting to keystore, skip tenant update raise e LOG.error( "Could not retrieve certificate, assuming it is deleted, skipping listener '%s': %s", listener.id, e) if status_manager: # Key / Container not found in keystore status_manager.set_error(listener) # Attach pools for pool in loadbalancer.pools: if not driver_utils.pending_delete(pool): app.add_entities( m_pool.get_pool(pool, loadbalancer_ips, status_manager)) # Attach newly created application tenant.add_application(m_app.get_name(loadbalancer.id), app) return tenant
def member_create(bigip, member): """Patches new member into existing pool :param bigip: bigip instance :param member: octavia member object """ path = '{}/{}/{}/members/-'.format( m_part.get_name(member.pool.load_balancer.vip.network_id), m_app.get_name(member.pool.load_balancer.id), m_pool.get_name(member.pool.id)) return bigip.patch(operation='add', path=path, value=m_member.get_member(member).to_dict())
def tenant_update(bigip, cert_manager, tenant, loadbalancers, segmentation_id): """Task to update F5s with all specified loadbalancers' configurations of a tenant (project). :param bigip: bigip instance :param cert_manager: CertManagerWrapper instance :param tenant: tenant_id/project_id :param loadbalancers: loadbalancer to update :param segmentation_id: segmentation_id of the loadbalancers :return: requests post result """ action = 'deploy' if CONF.f5_agent.dry_run: action = 'dry-run' decl = AS3(persist=True, action=action, syncToGroup=CONF.f5_agent.sync_to_group, _log_level=LOG.logger.level) adc = ADC(id="urn:uuid:{}".format(uuid.uuid4()), label="F5 BigIP Octavia Provider") decl.set_adc(adc) tenant = adc.get_or_create_tenant(m_part.get_name(tenant), defaultRouteDomain=segmentation_id) for loadbalancer in loadbalancers: if utils.pending_delete(loadbalancer): continue # Create generic application app = Application(constants.APPLICATION_GENERIC, label=loadbalancer.id) # Attach Octavia listeners as AS3 service objects for listener in loadbalancer.listeners: if not utils.pending_delete(listener): service_entities = m_service.get_service( listener, cert_manager, bigip.esd) app.add_entities(service_entities) # Attach pools for pool in loadbalancer.pools: if not utils.pending_delete(pool): app.add_entities(m_pool.get_pool(pool)) # Attach newly created application tenant.add_application(m_app.get_name(loadbalancer.id), app) return bigip.post(json=decl.to_json())
def get_service(listener, cert_manager, esd_repository): """ Map Octavia listener -> AS3 Service :param listener: Octavia listener :param cert_manager: cert_manager wrapper instance :return: AS3 Service + additional AS3 application objects """ # Entities is a list of tuples, which each describe AS3 objects # which may reference each other but do not form a hierarchy. entities = [] vip = listener.load_balancer.vip project_id = listener.load_balancer.project_id label = as3types.f5label(listener.name or listener.description) virtual_address = '{}/32'.format(vip.ip_address) service_args = { 'virtualPort': listener.protocol_port, 'persistenceMethods': [], 'iRules': [], 'policyEndpoint': [], 'label': label } # Custom virtual address settings if CONF.f5_agent.service_address_icmp_echo: service_address = as3.ServiceAddress( virtualAddress=virtual_address, icmpEcho=CONF.f5_agent.service_address_icmp_echo) entities.append( (m_app.get_name(listener.load_balancer.id), service_address)) service_args['virtualAddresses'] = [[ as3.Pointer(m_app.get_name(listener.load_balancer.id)), virtual_address ]] else: service_args['virtualAddresses'] = [virtual_address] # Determine service type if listener.protocol == const.PROTOCOL_TCP: service_args['_servicetype'] = CONF.f5_agent.tcp_service_type # UDP elif listener.protocol == const.PROTOCOL_UDP: service_args['_servicetype'] = const.SERVICE_UDP # HTTP elif listener.protocol == const.PROTOCOL_HTTP: service_args['_servicetype'] = const.SERVICE_HTTP # HTTPS (non-terminated, forward TCP traffic) elif listener.protocol == const.PROTOCOL_HTTPS: service_args['_servicetype'] = CONF.f5_agent.tcp_service_type # Proxy elif listener.protocol == const.PROTOCOL_PROXY: service_args['_servicetype'] = const.SERVICE_TCP name, irule = m_irule.get_proxy_irule() service_args['iRules'].append(name) entities.append((name, irule)) # Terminated HTTPS elif listener.protocol == const.PROTOCOL_TERMINATED_HTTPS: service_args['_servicetype'] = const.SERVICE_HTTPS service_args['serverTLS'] = m_tls.get_listener_name(listener.id) service_args['redirect80'] = False # Certificate Handling auth_name = None certificates = cert_manager.get_certificates(listener) if listener.client_ca_tls_certificate_id and listener.client_authentication != 'NONE': # Client Side Certificates try: auth_name, secret = cert_manager.load_secret( project_id, listener.client_ca_tls_certificate_id) entities.append((auth_name, m_cert.get_ca_bundle(secret, auth_name, auth_name))) except exceptions.CertificateRetrievalException as e: LOG.error("Error fetching certificate: %s", e) entities.append( (m_tls.get_listener_name(listener.id), m_tls.get_tls_server([cert['id'] for cert in certificates], auth_name, listener.client_authentication))) entities.extend([(cert['id'], cert['as3']) for cert in certificates]) if listener.connection_limit > 0: service_args['maxConnections'] = listener.connection_limit # Add default pool if listener.default_pool_id: pool = listener.default_pool if pool.provisioning_status != lib_consts.PENDING_DELETE: default_pool = m_pool.get_name(listener.default_pool_id) service_args['pool'] = default_pool # only consider Proxy pool, everything else is determined by listener type if pool.protocol == const.PROTOCOL_PROXY: name, irule = m_irule.get_proxy_irule() service_args['iRules'].append(name) entities.append((name, irule)) # Pool member certificate handling (TLS backends) if pool.tls_enabled and listener.protocol in \ [ const.PROTOCOL_PROXY, const.PROTOCOL_HTTP, const.PROTOCOL_TERMINATED_HTTPS ]: client_cert = None trust_ca = None crl_file = None service_args['clientTLS'] = m_tls.get_pool_name(pool.id) certificates = cert_manager.get_certificates(pool) if len(certificates) == 1: cert = certificates.pop() entities.append((cert['id'], cert['as3'])) client_cert = cert['id'] if pool.ca_tls_certificate_id: trust_ca, secret = cert_manager.load_secret( project_id, pool.ca_tls_certificate_id) entities.append( (trust_ca, m_cert.get_ca_bundle(secret, trust_ca, trust_ca))) if pool.crl_container_id: # TODO: CRL currently not supported pass entities.append((m_tls.get_pool_name(pool.id), m_tls.get_tls_client(trust_ca=trust_ca, client_cert=client_cert, crl_file=crl_file))) # Insert header irules if service_args['_servicetype'] in const.SERVICE_HTTP_TYPES: # HTTP profiles only for name, irule in m_irule.get_header_irules(listener.insert_headers): service_args['iRules'].append(name) entities.append((name, irule)) # session persistence if listener.default_pool_id and listener.default_pool.session_persistence: persistence = listener.default_pool.session_persistence lb_algorithm = listener.default_pool.lb_algorithm if service_args['_servicetype'] in const.SERVICE_HTTP_TYPES: # Add APP_COOKIE / HTTP_COOKIE persistance only in HTTP profiles if persistence.type == 'APP_COOKIE' and persistence.cookie_name: # generate iRule for cookie_name escaped_cookie = persistence.cookie_name escaped_cookie.replace("\"", "") irule_name, irule = m_irule.get_app_cookie_irule( escaped_cookie) entities.append((irule_name, irule)) # add iRule to universal persistance profile name, obj_persist = m_persist.get_app_cookie(escaped_cookie) service_args['persistenceMethods'] = [as3.Pointer(name)] entities.append((name, obj_persist)) if lb_algorithm == 'SOURCE_IP': service_args[ 'fallbackPersistenceMethod'] = 'source-address' elif persistence.type == 'HTTP_COOKIE': service_args['persistenceMethods'] = ['cookie'] if lb_algorithm == 'SOURCE_IP': service_args[ 'fallbackPersistenceMethod'] = 'source-address' if persistence.type == 'SOURCE_IP': if not persistence.persistence_timeout and not persistence.persistence_granularity: service_args['persistenceMethods'] = ['source-address'] else: name, obj_persist = m_persist.get_source_ip( persistence.persistence_timeout, persistence.persistence_granularity) service_args['persistenceMethods'] = [as3.Pointer(name)] entities.append((name, obj_persist)) # Map listener tags to ESDs for tag in listener.tags: # get ESD of same name esd = esd_repository.get_esd(tag) if esd is None: continue # enrich service with iRules and other things defined in ESD esd_entities = get_esd_entities(service_args['_servicetype'], esd) for entity_name in esd_entities: if entity_name == 'iRules': service_args['iRules'].extend(esd_entities['iRules']) else: service_args[entity_name] = esd_entities[entity_name] endpoint_policies = [] # Map special L7policies to ESDs # TODO: Remove this as soon as all customers have migrated their scripts. # Triggering ESDs via L7policies is considered deprecated. Tags should be used instead. See the code above. for policy in listener.l7policies: # get ESD of same name esd = esd_repository.get_esd(policy.name) # Add ESD or regular endpoint policy if esd: # enrich service with iRules and other things defined in ESD esd_entities = get_esd_entities(service_args['_servicetype'], esd) for entity_name in esd_entities: if entity_name == 'iRules': service_args['iRules'].extend(esd_entities['iRules']) else: service_args[entity_name] = esd_entities[entity_name] elif policy.provisioning_status != lib_consts.PENDING_DELETE: endpoint_policies.append(policy) # UDP listener won't support policies if endpoint_policies and not service_args[ '_servicetype'] == const.SERVICE_UDP: # add a regular endpoint policy policy_name = m_policy.get_wrapper_name(listener.id) # make endpoint policy object endpoint_policy = (policy_name, m_policy.get_endpoint_policy(endpoint_policies)) entities.append(endpoint_policy) # reference endpoint policy object in service service_args['policyEndpoint'].append(policy_name) # Ensure no duplicate iRules service_args['iRules'] = list(set(service_args['iRules'])) # fastL4 profile doesn't support iRules, fallback to TCP Profile when iRules detected if service_args['_servicetype'] == const.SERVICE_L4 and len( service_args['iRules']) > 0: service_args['_servicetype'] = const.SERVICE_TCP # add default profiles to supported listeners if CONF.f5_agent.profile_http and service_args[ '_servicetype'] in const.SERVICE_HTTP_TYPES: if 'profileHTTP' not in service_args: service_args['profileHTTP'] = as3.BigIP(CONF.f5_agent.profile_http) if CONF.f5_agent.profile_l4 and service_args[ '_servicetype'] == const.SERVICE_L4: if 'profileL4' not in service_args: service_args['profileL4'] = as3.BigIP(CONF.f5_agent.profile_l4) if CONF.f5_agent.profile_tcp and service_args[ '_servicetype'] in const.SERVICE_TCP_TYPES: if 'profileTCP' not in service_args: service_args['profileTCP'] = as3.BigIP(CONF.f5_agent.profile_tcp) if CONF.f5_agent.profile_udp and service_args[ '_servicetype'] == const.SERVICE_UDP: if 'profileUDP' not in service_args: service_args['profileUDP'] = as3.BigIP(CONF.f5_agent.profile_udp) # Use the virtual-server address as SNAT address if CONF.f5_agent.snat_virtual: service_args['snat'] = 'self' # create service object and fill in additional fields service = as3.Service(**service_args) # add service to entities and return entities.append((get_name(listener.id), service)) return entities