Ejemplo n.º 1
0
 def purge_orphaned_folders_contents(self, known_folders, bigip=None):
     """ Purge Folder of contents """
     if not bigip:
         bigip = self.bigip
     existing_folders = bigip.system.get_folders()
     # remove all folders which are default
     existing_folders.remove('/')
     existing_folders.remove('Common')
     # remove all folders which are not managed
     # with this object prefix.
     # copy the list so we are not modifying it while traversing it.
     existing_folders_copy = list(existing_folders)
     for folder in existing_folders_copy:
         if not folder.startswith(self.OBJ_PREFIX):
             existing_folders.remove(folder)
         # iapp folders need to be purged by removing the iapp
         if folder.endswith('.app'):
             existing_folders.remove(folder)
     for folder in known_folders:
         decorated_folder = bigip.decorate_folder(folder)
         if decorated_folder in existing_folders:
             existing_folders.remove(decorated_folder)
     # anything left should be purged
     if existing_folders:
         Log.debug('system',
                   'purging orphaned folders contents: %s'
                   % existing_folders)
     for folder in existing_folders:
         try:
             bigip.system.purge_folder_contents(folder, bigip)
         except Exception as exc:
             Log.error('purge_orphaned_folders_contents', exc.message)
Ejemplo n.º 2
0
 def purge_orphaned_folders(self, known_folders, bigip=None):
     """ Purge Folders """
     if not bigip:
         bigip = self.bigip
     existing_folders = bigip.system.get_folders()
     # remove all folders which are default
     existing_folders.remove('/')
     existing_folders.remove('Common')
     # remove all folders which are not managed
     # with this object prefix
     existing_folders_copy = list(existing_folders)
     for folder in existing_folders_copy:
         if not folder.startswith(self.OBJ_PREFIX):
             existing_folders.remove(folder)
         # iapp folders need to be purged by removing the iapp
         if folder.endswith('.app'):
             existing_folders.remove(folder)
     for folder in known_folders:
         decorated_folder = bigip.decorate_folder(folder)
         if decorated_folder in existing_folders:
             existing_folders.remove(decorated_folder)
     # anything left should be purged
     if existing_folders:
         Log.debug('system', 'purging orphaned folders: %s'
                   % existing_folders)
     for folder in existing_folders:
         try:
             bigip.system.purge_folder(folder, bigip)
         except Exception as exc:
             Log.error('purge_orphaned_folders', exc.message)
Ejemplo n.º 3
0
 def get_arps(self, ip_address=None, folder="Common"):
     folder = str(folder).replace("/", "")
     if ip_address:
         request_url = self.bigip.icr_url + "/net/arp/"
         request_url += "~" + folder + "~" + urllib.quote(self._remove_route_domain_zero(ip_address))
         response = self.bigip.icr_session.get(request_url, timeout=const.CONNECTION_TIMEOUT)
         Log.debug("ARP::get response", "%s" % response.json())
         if response.status_code < 400:
             response_obj = json.loads(response.text)
             return [{strip_domain_address(response_obj["name"]): response_obj["macAddress"]}]
         else:
             Log.error("ARP", response.text)
             raise exceptions.StaticARPQueryException(response.text)
     else:
         request_url = self.bigip.icr_url + "/net/arp"
         request_filter = "partition eq " + folder
         request_url += "?$filter=" + request_filter
         response = self.bigip.icr_session.get(request_url, timeout=const.CONNECTION_TIMEOUT)
         Log.debug("ARP::get response", "%s" % response.json())
         if response.status_code < 400:
             response_obj = json.loads(response.text)
             if "items" in response_obj:
                 arps = []
                 for arp in response_obj["items"]:
                     arps.append({strip_domain_address(arp["name"]): arp["macAddress"]})
                 return arps
         else:
             Log.error("ARP", response.text)
             raise exceptions.StaticARPQueryException(response.text)
     return []
 def set_persist_profile(self, name=None, profile_name=None,
                             folder='Common'):
     if self.exists(name=name, folder=folder):
         Log.debug('VirtualServer', 'resetting persistence.')
         self.lb_vs.remove_all_persistence_profiles([name])
         if profile_name.startswith('/Common'):
             profile_name = strip_folder_and_prefix(profile_name)
         try:
             vsp = self.lb_vs.typefactory.create(
             'LocalLB.VirtualServer.VirtualServerPersistence')
             vsp.profile_name = profile_name
             vsp.default_profile = True
             vsp_seq = self.lb_vs.typefactory.create(
             'LocalLB.VirtualServer.VirtualServerPersistenceSequence')
             vsp_seq.values = [vsp]
             vsp_seq_seq = self.lb_vs.typefactory.create(
         'LocalLB.VirtualServer.VirtualServerPersistenceSequenceSequence')
             vsp_seq_seq.values = [vsp_seq]
             Log.debug('VirtualServer', 'adding persistence %s'
                       % profile_name)
             self.lb_vs.add_persistence_profile([name], vsp_seq_seq)
             return True
         except WebFault as wf:
             if "already exists in partition" in str(wf.message):
                 Log.error('VirtualServer',
                 'tried to set source_addr persistence when exists')
             return False
         else:
             raise wf
     else:
         return False
 def add_profile(self, name=None, profile_name=None,
                 client_context=True, server_context=True,
                 folder='Common'):
     if profile_name.startswith("/Common"):
         profile_name = strip_folder_and_prefix(profile_name)
     Log.debug('VirtualServer', 'Does the following profile exist? %s %s'
               % (name, profile_name))
     if not self.virtual_server_has_profile(name=name,
                                        profile_name=profile_name,
                                        client_context=client_context,
                                        server_context=server_context,
                                        folder=folder):
         profile_context = 'PROFILE_CONTEXT_TYPE_ALL'
         if client_context and not server_context:
             profile_context = 'PROFILE_CONTEXT_TYPE_CLIENT'
         elif not client_context and server_context:
             profile_context = 'PROFILE_CONTEXT_TYPE_SERVER'
         vsp = self.lb_vs.typefactory.create(
           'LocalLB.VirtualServer.VirtualServerProfile')
         vsp.profile_name = profile_name
         vsp.profile_context = profile_context
         vsp_seq = self.lb_vs.typefactory.create(
           'LocalLB.VirtualServer.VirtualServerProfileSequence')
         vsp_seq.values = [vsp]
         vsp_seq_seq = self.lb_vs.typefactory.create(
           'LocalLB.VirtualServer.VirtualServerProfileSequenceSequence')
         vsp_seq_seq.values = [vsp_seq]
         self.lb_vs.add_profile([name], vsp_seq_seq)
         return True
     else:
         return False
Ejemplo n.º 6
0
 def create_multipoint_tunnel(self, name=None,
                              profile_name=None,
                              self_ip_address=None,
                              greid=0,
                              description=None,
                              folder='Common',
                              route_domain_id=0):
     """ Create multipoint tunnel """
     if not self.tunnel_exists(name=name, folder=folder):
         folder = str(folder).replace('/', '')
         payload = dict()
         payload['name'] = name
         payload['partition'] = folder
         payload['profile'] = profile_name
         payload['key'] = greid
         payload['localAddress'] = self_ip_address
         payload['remoteAddress'] = '0.0.0.0'
         if description:
             payload['description'] = description
         request_url = self.bigip.icr_url + '/net/tunnels/tunnel/'
         Log.debug('L2GRE', 'creating tunnel with %s' % json.dumps(payload))
         response = self.bigip.icr_session.post(
             request_url, data=json.dumps(payload),
             timeout=const.CONNECTION_TIMEOUT)
         if response.status_code < 400:
             if not folder == 'Common':
                 self.bigip.route.add_vlan_to_domain_by_id(
                     name=name, folder=folder,
                     route_domain_id=route_domain_id)
             return True
         else:
             Log.error('L2GRE', response.text)
             raise exceptions.L2GRETunnelCreationException(response.text)
     return False
Ejemplo n.º 7
0
 def purge_orphaned_folders(self, known_folders, bigip=None):
     """ Purge Folders """
     if not bigip:
         bigip = self.bigip
     existing_folders = bigip.system.get_folders()
     # remove all folders which are default
     existing_folders.remove('/')
     existing_folders.remove('Common')
     # remove all folders which are not managed
     # with this object prefix
     for folder in existing_folders:
         if not folder.startswith(self.OBJ_PREFIX):
             existing_folders.remove(folder)
     for folder in known_folders:
         decorated_folder = bigip.decorate_folder(folder)
         if decorated_folder in existing_folders:
             existing_folders.remove(decorated_folder)
     # anything left should be purged
     if existing_folders:
         Log.debug('system',
                   'purging orphaned folders: %s' % existing_folders)
     for folder in existing_folders:
         try:
             bigip.system.purge_folder(folder, bigip)
         except Exception as exc:
             Log.error('purge_orphaned_folders', exc.message)
Ejemplo n.º 8
0
    def purge_orhpaned_pools(self, known_pools,
                              delete_virtual_server=True):
        request_url = self.bigip.icr_url + '/ltm/pool'
        request_url += '?$select=name,partition'

        response = self.bigip.icr_session.get(request_url,
                                    timeout=const.CONNECTION_TIMEOUT)
        existing_pools = {}
        if response.status_code < 400:
            return_obj = json.loads(response.text)
            if 'items' in return_obj:
                for pool in return_obj['items']:
                    existing_pools[pool['name']] = pool['partition']
        elif response.status_code != 404:
            Log.error('pool', response.text)
            raise exceptions.PoolQueryException(response.text)

        Log.debug('pool', 'purging pools - existing : %s, known : %s'
                 % (existing_pools.keys(), known_pools))

        # remove all pools which are not managed
        # with this object prefix
        for pool in existing_pools:
            if not pool.startswith(self.OBJ_PREFIX):
                del(existing_pools[pool])

        for pool in known_pools:
            decorated_pool = self.OBJ_PREFIX + pool
            if decorated_pool in existing_pools:
                del(existing_pools[decorated_pool])
        # anything left should be purged
        for pool in existing_pools:
            vs_name = \
                 self.bigip.virtual_server.get_virtual_servers_by_pool_name(
                    pool_name=pool, folder=existing_pools[pool])
            if vs_name:
                try:
                    self.bigip.virtual_server.delete(name=vs_name,
                                         folder=existing_pools[pool])
                    self.bigip.virtual_server.delete_persist_profile_like(
                                         match=vs_name,
                                         folder=existing_pools[pool])
                    self.bigip.rule.delete_like(match=vs_name,
                                        folder=existing_pools[pool])
                    self.bigip.virtual_server.delete_profile_like(
                                         match=vs_name,
                                         folder=existing_pools[pool])
                except Exception as e:
                    Log.error('purge_orphaned_pools', e.message)
            try:
                self.delete(name=pool, folder=existing_pools[pool])
            except Exception as e:
                    Log.error('purge_orphaned_pools', e.message)
Ejemplo n.º 9
0
 def get_tunnel_key(self, name=None, folder='Common'):
     """ Get tunnel key """
     folder = str(folder).replace('/', '')
     request_url = self.bigip.icr_url + '/net/tunnels/tunnel/'
     request_url += '~' + folder + '~' + name
     response = self.bigip.icr_session.get(request_url,
                                           timeout=const.CONNECTION_TIMEOUT)
     if response.status_code < 400:
         return_obj = json.loads(response.text)
         Log.debug('L2GRE', 'get_tunnel_key got %s' + str(return_obj))
         return return_obj['key']
     elif response.status_code != 404:
         Log.error('L2GRE', response.text)
         exceptions.L2GRETunnelQueryException(response.text)
     return None
Ejemplo n.º 10
0
 def get_tunnel_key(self, name=None, folder='Common'):
     """ Get tunnel key """
     folder = str(folder).replace('/', '')
     request_url = self.bigip.icr_url + '/net/tunnels/tunnel/'
     request_url += '~' + folder + '~' + name
     response = self.bigip.icr_session.get(
         request_url, timeout=const.CONNECTION_TIMEOUT)
     if response.status_code < 400:
         return_obj = json.loads(response.text)
         Log.debug('VXLAN', 'get_tunnel_key got %s' + str(return_obj))
         return return_obj['key']
     elif response.status_code != 404:
         Log.error('VXLAN', response.text)
         exceptions.VXLANQueryException(response.text)
     return None
Ejemplo n.º 11
0
    def delete_by_subnet(self, subnet=None, mask=None, folder='Common'):
        if subnet:
            mask_div = subnet.find('/')
            if mask_div > 0:
                try:
                    rd_div = subnet.find(':')
                    if rd_div > -1:
                        network = netaddr.IPNetwork(
                            subnet[0:mask_div][0:rd_div] + subnet[mask_div:])
                    else:
                        network = netaddr.IPNetwork(subnet)
                except Exception as e:
                    Log.error('ARP', e.message)
                    return []
            elif not mask:
                return []
            else:
                try:
                    rd_div = subnet.find(':')
                    if rd_div > -1:
                        network = netaddr.IPNetwork(subnet[0:rd_div] + '/' +
                                                    mask)
                    else:
                        network = netaddr.IPNetwork(subnet + '/' + mask)
                except Exception as e:
                    Log.error('ARP', e.message)
                    return []

            mac_addresses = []
            if network:
                request_url = self.bigip.icr_url + '/net/arp'
                request_filter = 'partition eq ' + folder
                request_url += '?$filter=' + request_filter
                response = self.bigip.icr_session.get(
                    request_url, timeout=const.CONNECTION_TIMEOUT)
                Log.debug('ARP::get response', '%s' % response.json())
                if response.status_code < 400:
                    response_obj = json.loads(response.text)
                    if 'items' in response_obj:
                        for arp in response_obj['items']:
                            ad_rd_div = arp['ipAddress'].find('%')
                            address = netaddr.IPAddress(
                                arp['ipAddress'][0:ad_rd_div])
                            if address in network:
                                mac_addresses.append(arp['macAddress'])
                                self.delete(arp['ipAddress'],
                                            folder=arp['partition'])
            return mac_addresses
Ejemplo n.º 12
0
 def delete(self, name=None, folder='Common'):
     if self.exists(name=name, folder=folder):
         try:
             self.lb_snataddress.delete_translation_address([name])
         except WebFault as wf:
             if "is still referenced by a snat pool" \
                                                        in str(wf.message):
                 Log.debug('SNAT',
                          'Can not delete SNAT address %s ..still in use.'
                          % name)
                 return False
             else:
                 raise wf
         return True
     else:
         # Odd logic compared to other delete.
         # we need this because of the dependency
         # on the SNAT address in other pools
         return True
Ejemplo n.º 13
0
    def delete_by_subnet(self, subnet=None, mask=None, folder="Common"):
        if subnet:
            mask_div = subnet.find("/")
            if mask_div > 0:
                try:
                    rd_div = subnet.find(":")
                    if rd_div > -1:
                        network = netaddr.IPNetwork(subnet[0:mask_div][0:rd_div] + subnet[mask_div:])
                    else:
                        network = netaddr.IPNetwork(subnet)
                except Exception as e:
                    Log.error("ARP", e.message)
                    return []
            elif not mask:
                return []
            else:
                try:
                    rd_div = subnet.find(":")
                    if rd_div > -1:
                        network = netaddr.IPNetwork(subnet[0:rd_div] + "/" + mask)
                    else:
                        network = netaddr.IPNetwork(subnet + "/" + mask)
                except Exception as e:
                    Log.error("ARP", e.message)
                    return []

            mac_addresses = []
            if network:
                request_url = self.bigip.icr_url + "/net/arp"
                request_filter = "partition eq " + folder
                request_url += "?$filter=" + request_filter
                response = self.bigip.icr_session.get(request_url, timeout=const.CONNECTION_TIMEOUT)
                Log.debug("ARP::get response", "%s" % response.json())
                if response.status_code < 400:
                    response_obj = json.loads(response.text)
                    if "items" in response_obj:
                        for arp in response_obj["items"]:
                            ad_rd_div = arp["ipAddress"].find("%")
                            address = netaddr.IPAddress(arp["ipAddress"][0:ad_rd_div])
                            if address in network:
                                mac_addresses.append(arp["macAddress"])
                                self.delete(arp["ipAddress"], folder=arp["partition"])
            return mac_addresses
Ejemplo n.º 14
0
 def get_arps(self, ip_address=None, folder='Common'):
     """ Get ARP static entry """
     folder = str(folder).replace('/', '')
     if ip_address:
         request_url = self.bigip.icr_url + '/net/arp/'
         request_url += '~' + folder + '~' + urllib.quote(
             self._remove_route_domain_zero(ip_address))
         response = self.bigip.icr_session.get(
             request_url, timeout=const.CONNECTION_TIMEOUT)
         Log.debug('ARP::get response',
                   '%s' % response.json())
         if response.status_code < 400:
             response_obj = json.loads(response.text)
             return [
                 {strip_domain_address(response_obj['name']):
                  response_obj['macAddress']}
             ]
         else:
             Log.error('ARP', response.text)
             raise exceptions.StaticARPQueryException(response.text)
     else:
         request_url = self.bigip.icr_url + '/net/arp'
         request_filter = 'partition eq ' + folder
         request_url += '?$filter=' + request_filter
         response = self.bigip.icr_session.get(
             request_url, timeout=const.CONNECTION_TIMEOUT)
         Log.debug('ARP::get response',
                   '%s' % response.json())
         if response.status_code < 400:
             response_obj = json.loads(response.text)
             if 'items' in response_obj:
                 arps = []
                 for arp in response_obj['items']:
                     arps.append(
                         {strip_domain_address(arp['name']):
                          arp['macAddress']}
                     )
                 return arps
         else:
             Log.error('ARP', response.text)
             raise exceptions.StaticARPQueryException(response.text)
     return []
Ejemplo n.º 15
0
 def get_arps(self, ip_address=None, folder='Common'):
     """ Get ARP static entry """
     folder = str(folder).replace('/', '')
     if ip_address:
         request_url = self.bigip.icr_url + '/net/arp/'
         request_url += '~' + folder + '~' + urllib.quote(
             self._remove_route_domain_zero(ip_address))
         response = self.bigip.icr_session.get(
             request_url, timeout=const.CONNECTION_TIMEOUT)
         Log.debug('ARP::get response',
                   '%s' % response.json())
         if response.status_code < 400:
             response_obj = json.loads(response.text)
             return [
                 {response_obj['name']:
                  response_obj['macAddress']}
             ]
         else:
             Log.error('ARP', response.text)
             raise exceptions.StaticARPQueryException(response.text)
     else:
         request_url = self.bigip.icr_url + '/net/arp'
         request_filter = 'partition eq ' + folder
         request_url += '?$filter=' + request_filter
         response = self.bigip.icr_session.get(
             request_url, timeout=const.CONNECTION_TIMEOUT)
         Log.debug('ARP::get response',
                   '%s' % response.json())
         if response.status_code < 400:
             response_obj = json.loads(response.text)
             if 'items' in response_obj:
                 arps = []
                 for arp in response_obj['items']:
                     arps.append(
                         {arp['name']:
                          arp['macAddress']}
                     )
                 return arps
         else:
             Log.error('ARP', response.text)
             raise exceptions.StaticARPQueryException(response.text)
     return []
Ejemplo n.º 16
0
 def remove_from_pool(self, name=None, member_name=None, folder='Common'):
     existing_members = self.lb_snatpool.get_member_v2([name])[0]
     if member_name in existing_members:
         string_seq = \
          self.lb_snatpool.typefactory.create('Common.StringSequence')
         string_seq_seq = \
     self.lb_snatpool.typefactory.create('Common.StringSequenceSequence')
         string_seq.values = member_name
         string_seq_seq.values = [string_seq]
         try:
             self.lb_snatpool.remove_member_v2([name], string_seq_seq)
             return True
         except WebFault as wf:
             if "must reference at least one translation address" \
                                                        in str(wf.message):
                 Log.debug('SNAT',
                 'removing SNATPool because last member is being removed')
                 self.lb_snatpool.delete_snat_pool([name])
                 return True
     return False
Ejemplo n.º 17
0
 def purge_orphaned_folders(self, known_folders, bigip=None):
     if not bigip:
         bigip = self.bigip
     existing_folders = bigip.system.get_folders()
     # remove all folders which are default
     existing_folders.remove('/')
     existing_folders.remove('Common')
     # remove all folders which are not managed
     # with this object prefix
     for folder in existing_folders:
         if not folder.startswith(self.OBJ_PREFIX):
             existing_folders.remove(folder)
     for folder in known_folders:
         decorated_folder = bigip.decorate_folder(folder)
         if decorated_folder in existing_folders:
             existing_folders.remove(decorated_folder)
     # anything left should be purged
     Log.debug('system', 'purging orphaned tenants: %s' % existing_folders)
     for folder in existing_folders:
         try:
             bigip.system.purge_folder(folder, bigip)
         except Exception as e:
             Log.error('purge_orphaned_folders', e.message)
Ejemplo n.º 18
0
 def create_multipoint_tunnel(self,
                              name=None,
                              profile_name=None,
                              self_ip_address=None,
                              greid=0,
                              description=None,
                              folder='Common',
                              route_domain_id=0):
     """ Create multipoint tunnel """
     if not self.tunnel_exists(name=name, folder=folder):
         folder = str(folder).replace('/', '')
         payload = dict()
         payload['name'] = name
         payload['partition'] = folder
         payload['profile'] = profile_name
         payload['key'] = greid
         payload['localAddress'] = self_ip_address
         payload['remoteAddress'] = '0.0.0.0'
         if description:
             payload['description'] = description
         request_url = self.bigip.icr_url + '/net/tunnels/tunnel/'
         Log.debug('L2GRE', 'creating tunnel with %s' % json.dumps(payload))
         response = self.bigip.icr_session.post(
             request_url,
             data=json.dumps(payload),
             timeout=const.CONNECTION_TIMEOUT)
         if response.status_code < 400:
             if not folder == 'Common':
                 self.bigip.route.add_vlan_to_domain_by_id(
                     name=name,
                     folder=folder,
                     route_domain_id=route_domain_id)
             return True
         else:
             Log.error('L2GRE', response.text)
             raise exceptions.L2GRETunnelCreationException(response.text)
     return False
Ejemplo n.º 19
0
    def sync(self, name, force_now=False):
        """ Ensure local device in sync with group """
        sync_start_time = time.time()
        dev_name = self.get_local_device_name()
        sleep_delay = const.SYNC_DELAY

        attempts = 0
        if force_now:
            self.sync_local_device_to_group(name)
            time.sleep(sleep_delay)
            attempts += 1

        while attempts < const.MAX_SYNC_ATTEMPTS:
            state = self.get_sync_status()
            if state in ['Standalone', 'In Sync']:
                break

            elif state == 'Awaiting Initial Sync':
                attempts += 1
                Log.info(
                    'Cluster',
                    "Device %s - Synchronizing initial config to group %s"
                    % (dev_name, name))
                self.sync_local_device_to_group(name)
                time.sleep(sleep_delay)

            elif state in ['Disconnected',
                           'Not All Devices Synced',
                           'Changes Pending']:
                attempts += 1

                last_log_time = 0
                now = time.time()
                wait_start_time = now
                # Keep checking the sync state in a quick loop.
                # We want to detect In Sync as quickly as possible.
                while now - wait_start_time < sleep_delay:
                    # Only log once per second
                    if now - last_log_time >= 1:
                        Log.info(
                            'Cluster',
                            'Device %s, Group %s not synced. '
                            % (dev_name, name) +
                            'Waiting. State is: %s'
                            % state)
                        last_log_time = now
                    state = self.get_sync_status()
                    if state in ['Standalone', 'In Sync']:
                        break
                    time.sleep(.5)
                    now = time.time()
                else:
                    # if we didn't break out due to the group being in sync
                    # then attempt to force a sync.
                    self.sync_local_device_to_group(name)
                    sleep_delay += const.SYNC_DELAY
                    # no need to sleep here because we already spent the sleep
                    # interval checking status.
                    continue

                # Only a break from the inner while loop due to Standalone or
                # In Sync will reach here.
                # Normal exit of the while loop reach the else statement
                # above which continues the outer loop
                break

            elif state == 'Sync Failure':
                Log.info('Cluster',
                         "Device %s - Synchronization failed for %s"
                         % (dev_name, name))
                Log.debug('Cluster', 'SYNC SECONDS (Sync Failure): ' +
                          str(time.time() - sync_start_time))
                raise exceptions.BigIPClusterSyncFailure(
                    'Device service group %s' % name +
                    ' failed after ' +
                    '%s attempts.' % const.MAX_SYNC_ATTEMPTS +
                    ' Correct sync problem manually' +
                    ' according to sol13946 on ' +
                    ' support.f5.com.')
            else:
                attempts += 1
                Log.info('Cluster',
                         "Device %s " % dev_name +
                         "Synchronizing config attempt %s to group %s:"
                         % (attempts, name) + " current state: %s" % state)
                self.sync_local_device_to_group(name)
                time.sleep(sleep_delay)
                sleep_delay += const.SYNC_DELAY
        else:
            if state == 'Disconnected':
                Log.debug('Cluster',
                          'SYNC SECONDS(Disconnected): ' +
                          str(time.time() - sync_start_time))
                raise exceptions.BigIPClusterSyncFailure(
                    'Device service group %s' % name +
                    ' could not reach a sync state' +
                    ' because they can not communicate' +
                    ' over the sync network. Please' +
                    ' check connectivity.')
            else:
                Log.debug('Cluster', 'SYNC SECONDS(Timeout): ' +
                          str(time.time() - sync_start_time))
                raise exceptions.BigIPClusterSyncFailure(
                    'Device service group %s' % name +
                    ' could not reach a sync state after ' +
                    '%s attempts.' % const.MAX_SYNC_ATTEMPTS +
                    ' It is in %s state currently.' % state +
                    ' Correct sync problem manually' +
                    ' according to sol13946 on ' +
                    ' support.f5.com.')

        Log.debug('Cluster', 'SYNC SECONDS(Success): ' +
                  str(time.time() - sync_start_time))
Ejemplo n.º 20
0
    def purge_orphaned_pools(self, known_pools, delete_virtual_server=True):
        request_url = self.bigip.icr_url + '/ltm/pool'
        request_url += '?$select=name,partition'

        response = self.bigip.icr_session.get(
            request_url, timeout=const.CONNECTION_TIMEOUT)
        existing_pools = {}
        if response.status_code < 400:
            return_obj = json.loads(response.text)
            if 'items' in return_obj:
                for pool in return_obj['items']:
                    existing_pools[pool['name']] = pool['partition']
        elif response.status_code != 404:
            Log.error('pool', response.text)
            raise exceptions.PoolQueryException(response.text)

        Log.debug('pool', 'purging pools - existing : %s, known : %s'
                  % (existing_pools.keys(), known_pools))

        # we start with all pools and remove the ones that are
        # completely unrelated to the plugin or are OK to be there.
        cleanup_list = dict(existing_pools)

        # remove all pools which are not managed by this plugin
        for pool in existing_pools:
            if not pool.startswith(self.OBJ_PREFIX):
                del cleanup_list[pool]

        for pool in known_pools:
            decorated_pool = self.OBJ_PREFIX + pool
            Log.debug('pool', 'excluding %s from %s' %
                      (str(decorated_pool), str(cleanup_list)))
            if decorated_pool in cleanup_list:
                del cleanup_list[decorated_pool]

        # anything left should be purged
        for pool in cleanup_list:
            Log.debug('purge_orphaned_pools',
                      "Purging pool %s in folder %s" %
                      (pool, cleanup_list[pool]))
            vs_name = \
                self.bigip.virtual_server.get_virtual_servers_by_pool_name(
                    pool_name=pool, folder=cleanup_list[pool])
            if vs_name:
                try:
                    self.bigip.virtual_server.delete(
                        name=vs_name, folder=cleanup_list[pool])
                    self.bigip.virtual_server.delete_persist_profile_like(
                        match=vs_name, folder=cleanup_list[pool])
                    self.bigip.rule.delete_like(
                        match=vs_name, folder=cleanup_list[pool])
                    self.bigip.virtual_server.delete_profile_like(
                        match=vs_name, folder=cleanup_list[pool])
                except Exception as e:
                    Log.error('purge_orphaned_pools', e.message)
            try:
                Log.debug('purge_orphaned_pools',
                          "Deleting pool %s in folder %s" %
                          (pool, cleanup_list[pool]))
                self.delete(name=pool, folder=cleanup_list[pool])
            except Exception as e:
                    Log.error('purge_orphaned_pools', e.message)
Ejemplo n.º 21
0
    def purge_orphaned_pools(self, known_pools, delete_virtual_server=True):
        request_url = self.bigip.icr_url + '/ltm/pool'
        request_url += '?$select=name,partition'

        response = self.bigip.icr_session.get(request_url,
                                              timeout=const.CONNECTION_TIMEOUT)
        existing_pools = {}
        if response.status_code < 400:
            return_obj = json.loads(response.text)
            if 'items' in return_obj:
                for pool in return_obj['items']:
                    existing_pools[pool['name']] = pool['partition']
        elif response.status_code != 404:
            Log.error('pool', response.text)
            raise exceptions.PoolQueryException(response.text)

        Log.debug(
            'pool', 'purging pools - existing : %s, known : %s' %
            (existing_pools.keys(), known_pools))

        # we start with all pools and remove the ones that are
        # completely unrelated to the plugin or are OK to be there.
        cleanup_list = dict(existing_pools)

        # remove all pools which are not managed by this plugin
        for pool in existing_pools:
            if not pool.startswith(self.OBJ_PREFIX):
                del cleanup_list[pool]

        for pool in known_pools:
            decorated_pool = self.OBJ_PREFIX + pool
            Log.debug(
                'pool', 'excluding %s from %s' %
                (str(decorated_pool), str(cleanup_list)))
            if decorated_pool in cleanup_list:
                del cleanup_list[decorated_pool]
            # Exclude known iapp pool
            decorated_pool += '_pool'
            Log.debug(
                'pool', 'excluding %s from %s' %
                (str(decorated_pool), str(cleanup_list)))
            if decorated_pool in cleanup_list:
                del cleanup_list[decorated_pool]

        # anything left should be purged
        for pool in cleanup_list:
            Log.debug(
                'purge_orphaned_pools',
                "Purging pool %s in folder %s" % (pool, cleanup_list[pool]))
            vs_name = \
                self.bigip.virtual_server.get_virtual_servers_by_pool_name(
                    pool_name=pool, folder=cleanup_list[pool])
            if vs_name:
                try:
                    self.bigip.virtual_server.delete(name=vs_name,
                                                     folder=cleanup_list[pool])
                    self.bigip.virtual_server.delete_persist_profile_like(
                        match=vs_name, folder=cleanup_list[pool])
                    self.bigip.rule.delete_like(match=vs_name,
                                                folder=cleanup_list[pool])
                    self.bigip.virtual_server.delete_profile_like(
                        match=vs_name, folder=cleanup_list[pool])
                except Exception as e:
                    Log.error('purge_orphaned_pools', e.message)
            try:
                Log.debug(
                    'purge_orphaned_pools', "Deleting pool %s in folder %s" %
                    (pool, cleanup_list[pool]))
                self.delete(name=pool, folder=cleanup_list[pool])
            except Exception as e:
                Log.error('purge_orphaned_pools', e.message)
Ejemplo n.º 22
0
    def sync(self, name, force_now=False):
        sync_start_time = time.time()
        self.bigip.system.set_folder('/Common')
        dev_name = self.mgmt_dev.get_local_device()
        sleep_delay = const.SYNC_DELAY

        attempts = 0
        if force_now:
            self.sys_sync.synchronize_to_group_v2(name, dev_name, True)
            time.sleep(sleep_delay)
            attempts += 1

        while attempts < const.MAX_SYNC_ATTEMPTS:
            state = self.get_sync_status()
            if state in ['Standalone',
                         'In Sync',
                        ]:
                break

            elif state == 'Awaiting Initial Sync':
                attempts += 1
                Log.info('Cluster',
                    "Device %s - Synchronizing initial config to group %s"
                    % (dev_name, name))
                self.sys_sync.synchronize_to_group_v2(name, dev_name, True)
                time.sleep(sleep_delay)

            elif state in ['Disconnected',
                            'Not All Devices Synced',
                            'Changes Pending',
                           ]:
                attempts += 1

                last_log_time = 0
                now = time.time()
                wait_start_time = now
                # Keep checking the sync state in a quick loop.
                # We want to detect In Sync as quickly as possible.
                while now - wait_start_time < sleep_delay:
                    # Only log once per second
                    if now - last_log_time >= 1:
                        Log.info('Cluster',
                            'Device %s, Group %s not synced. '
                            % (dev_name, name) + \
                            'Waiting. State is: %s'
                            % state)
                        last_log_time = now
                    state = self.get_sync_status()
                    if state in ['Standalone',
                                 'In Sync',
                                ]:
                        break
                    time.sleep(.5)
                    now = time.time()
                else:
                    # if we didn't break out due to the group being in sync
                    # then attempt to force a sync.
                    self.sys_sync.synchronize_to_group_v2(name, dev_name, True)
                    sleep_delay += const.SYNC_DELAY
                    # no need to sleep here because we already spent the sleep
                    # interval checking status.
                    continue

                # Only a break from the inner while loop due to Standalone or
                # In Sync will reach here.
                # Normal exit of the while loop reach the else statement
                # above which continues the outer loop
                break

            elif state == 'Sync Failure':
                Log.info('Cluster',
                "Device %s - Synchronization failed for %s"
                % (dev_name, name))
                Log.debug('Cluster', 'SYNC SECONDS (Sync Failure): ' + \
                            str(time.time() - sync_start_time))
                raise BigIPClusterSyncFailure(
                   'Device service group %s' % name + \
                   ' failed after ' + \
                   '%s attempts.' % const.MAX_SYNC_ATTEMPTS + \
                   ' Correct sync problem manually' + \
                   ' according to sol13946 on ' + \
                   ' support.f5.com.')
            else:
                attempts += 1
                Log.info('Cluster',
                "Device %s " % dev_name \
                + "Synchronizing config attempt %s to group %s:"
                % (attempts, name) \
                + " current state: %s" % state)
                self.sys_sync.synchronize_to_group_v2(name, dev_name, True)
                time.sleep(sleep_delay)
                sleep_delay += const.SYNC_DELAY
        else:
            if state == 'Disconnected':
                Log.debug('Cluster',
                          'SYNC SECONDS(Disconnected): ' + \
                              str(time.time() - sync_start_time))
                raise BigIPClusterSyncFailure(
                        'Device service group %s' % name + \
                        ' could not reach a sync state' + \
                        ' because they can not communicate' + \
                        ' over the sync network. Please' + \
                        ' check connectivity.')
            else:
                Log.debug('Cluster', 'SYNC SECONDS(Timeout): ' + \
                              str(time.time() - sync_start_time))
                raise BigIPClusterSyncFailure(
                    'Device service group %s' % name + \
                    ' could not reach a sync state after ' + \
                    '%s attempts.' % const.MAX_SYNC_ATTEMPTS + \
                    ' It is in %s state currently.' % state + \
                    ' Correct sync problem manually' + \
                    ' according to sol13946 on ' + \
                    ' support.f5.com.')

        Log.debug('Cluster', 'SYNC SECONDS(Success): ' + \
                      str(time.time() - sync_start_time))