Пример #1
0
    def reset_all(self):
        group = RCMD.device.DEFAULT_ALLBIGIQS_GROUP
        for device in [self.default] + self.peers:
            with SSHInterface(device=device) as sshifc:
                LOG.info('Wiping storage on {0}'.format(device))
                SCMD.ssh.generic(SCMD.bigiq.ha.HA_WIPE_COMMAND, ifc=sshifc)

        with EmapiInterface(device=device, auth=AUTH.BASIC) as rstifc:
            RCMD.system.wait_restjavad([self.default] + self.peers, ifc=rstifc)

        # For IPv6 runs where localhost will get reset to IPv4.
        for device in [self.default] + self.peers:
            with EmapiInterface(device=device, auth=AUTH.BASIC) as rstifc:
                resp = rstifc.api.get(DeviceResolver.DEVICES_URI % group)
                selfip_expect = device.get_discover_address()
                selfips_actual = [x.address for x in resp['items']]
                if selfip_expect not in selfips_actual:
                    LOG.info(
                        "selfip mismatch. Setting {0}".format(selfip_expect))
                    self_addr = IPAddress(selfip_expect)
                    payload = NetworkDiscover()
                    payload.discoveryAddress = self_addr.format(ipv6_full)
                    rstifc.api.put(NetworkDiscover.URI, payload=payload)
                    DeviceResolver.wait(rstifc.api, group)

        # For BZ workarounds..

        bigips = []
        context = ContextHelper()
        default_bigiq = context.get_icontrol(device=self.default).version
        session = context.get_config().get_session().name

        for device in context.get_config().get_devices():
            v = context.get_icontrol(device=device).version
            if v.product.is_bigip and v >= 'bigip 11.3.0':
                bigips.append(device)

        if default_bigiq > 'bigiq 4.3.0' and default_bigiq < 'bigiq 4.5.0':
            with EmapiInterface(device=self.default,
                                auth=AUTH.BASIC) as rstifc:
                RCMD.device.clean_dg_certs(bigips, ifc=rstifc)

            with EmapiInterface(device=self.default,
                                auth=AUTH.BASIC) as rstifc:
                RCMD.system.bz_help1([self.default] + self.peers, ifc=rstifc)

        if default_bigiq > 'bigiq 4.3.0' and default_bigiq < 'bigiq 4.5.0':
            with SSHInterface(device=self.default) as sshifc:
                SCMD.bigiq.ha.wait_ha_peer(self.peers,
                                           session=session,
                                           ifc=sshifc)
Пример #2
0
class CreateAdcNodeObjects(IcontrolRestCommand):  # @IgnorePep8
    """ Create the specified number of ADC Node objects on the BIG-IQ for
        the specified BIG-IP.  Works for BIG-IQ 4.6.0 and later.

        You must deploy the ADC objects from the BIG-IQ to the BIG-IP(s) with
        a separate call.
    """
    def __init__(self, node_count, bigip, *args, **kwargs):
        """ Object initialization.

            @param node_count: The number of ADC nodes to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
        """
        super(CreateAdcNodeObjects, self).__init__(*args, **kwargs)
        self.node_count = node_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.ip_gen = ipv4_address_generator()

    def run(self):
        """ Generate the specified number of ADC Node objects for the given
            BIG-IP in the default BIG-IQ.

            @return: List of Node Names that were generated, a list of
            Node Addresses that were generated, and the Self-IP links for
            each node generated.  These are needed when generating Pools and
            Virtual Servers.
        """
        LOG.info("Creating {0} node(s) in the BigIQ working config...".format(
            self.node_count))
        node_names = []
        node_addresses = []
        node_selflinks = []

        for _ in range(self.node_count):
            num = NEXT_NUMBER.get_next_number()
            self.object_counter = NEXT_NUMBER.get_next_number()
            node_name = 'ScaleNode-%s-device%d-obj%d' %\
                        (self.cfgifc.get_session().name, num,
                         self.object_counter)
            node_names.append(node_name)
            node_address = next(self.ip_gen)
            node_addresses.append(node_address)

            payload = WorkingLtmNode(name=node_name,
                                     address=node_address,
                                     partition=PARTITION)
            payload.deviceReference.set(
                'https://localhost' + DeviceResolver.DEVICE_URI %
                (DEFAULT_ADC_GROUP, self.bigip['machineId']))
            create_node_resp = self.api.post(WorkingLtmNode.URI, payload)
            node_selflinks.append(create_node_resp.selfLink)

        return node_names, node_addresses, node_selflinks
Пример #3
0
class AdcVipObjectsCreate(IcontrolRestCommand):  # @IgnorePep8
    """ Create the specified number of ADC VIP objects on the BIG-IQ for
        the specified BIG-IP.  Works for BIG-IQ 4.6.0 and later.

        You must deploy the ADC objects from the BIG-IQ to the BIG-IP(s) with
        a separate call.
    """
    def __init__(self, vip_count, bigip, pool_names, pool_selflinks, *args,
                 **kwargs):
        """ Object initialization.

            @param vip_count: The number of ADC VIPs to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param pool_names: Names of the Pools previously created.
            @param pool_selflinks: List of Pool Selflinks to use to link up
            with the VIPs.
        """
        super(AdcVipObjectsCreate, self).__init__(*args, **kwargs)
        self.vip_count = vip_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.pool_names = pool_names
        self.pool_selflinks = pool_selflinks
        self.ip_gen = ipv4_address_generator()

    def setup(self):
        """ Generate the specified number of ADC VIP objects for the given
            BIG-IP in the default BIG-IQ.
        """
        LOG.info("Creating {0} VIP(s) in the BigIQ working config...".format(
            self.vip_count))
        num = NEXT_NUMBER.get_next_number()

        for i in range(self.vip_count):
            self.object_counter = NEXT_NUMBER.get_next_number()
            vip_name = 'ScaleVip-%s-device%d-obj%d' %\
                       (self.cfgifc.get_session().name, num,
                        self.object_counter)
            vip_address = next(self.ip_gen)
            payload = WorkingLtmVip(name=vip_name,
                                    destination=vip_address + ':80',
                                    fullPath='/' + PARTITION + '/' + vip_name,
                                    partition=PARTITION,
                                    pool='/' + PARTITION + '/' +
                                    self.pool_names[i])
            payload.deviceReference.set(
                'https://localhost' + DeviceResolver.DEVICE_URI %
                (DEFAULT_ADC_GROUP, self.bigip['machineId']))
            payload.poolReference.set(self.pool_selflinks[i])
            self.api.post(WorkingLtmVip.URI, payload)
Пример #4
0
 def __init__(self, handle, type=None):
     #self.handle = handle
     self.handle = 0
     self.type = type
     context = ContextHelper()
     self.cfgifc = context.get_config()
     rr = self.cfgifc.get_ranges()
     self.pools = self.cfgifc.get_respools()
     self.value_to_item = {}
     for device in self.cfgifc.get_devices(KIND_TMOS_BIGIP):
         #f5ite/bigip.1/mgmt/ip
         self._handle_vars['f5ite/{}.{}/mgmt/ip'.format(
             *device.alias.split('-'))] = device.address
Пример #5
0
class GenerateObjectName(Macro):
    object_counter = 0

    def __init__(self, obj_type='Object', parent_id='name', *args, **kwargs):
        """
        Arguments are not necessary, but can help communicate information about the object at a glance.
        @param obj_type: Type of the object for which the name is being generated. e.g. 'Pool'
        @param parent_id: Unique identifier of the parent object, e.g. machineId, IP address, etc.
        @return: generated name
        """
        super(GenerateObjectName, self).__init__(*args, **kwargs)
        self.obj_type = obj_type
        self.parent_id = parent_id
        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session().name

    @classmethod
    def increment_counter(cls):
        cls.object_counter += 1

    def run(self):
        self.increment_counter()
        return '{0}-{1}-{2}-obj{3}'.format(self.obj_type, self.session,
                                           self.parent_id, self.object_counter)
Пример #6
0
class CoreCollector(Thread):
    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector,
              self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()

    def _get_session_dir(self):
        path = self.session.path

        if path and not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)

        return path

    def _get_or_create_dirs(self, name, root=None):
        if root is None:
            root = self._get_session_dir()
        else:
            if not os.path.isdir(root):
                root = os.path.join(self._get_session_dir(), root)

        created = False
        path = os.path.join(root, name)

        # Windows-based NAS doesn't support :'s in names
        path = path.replace(':', '@')
        if not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)
            created = True

        return path, created

    def run(self):
        LOG.info('Looking for cores...')
        d = self.data.cores
        d.data = {}
        d.checked = time.time()

        with self.sshifc as sshifc:
            if SCMD.ssh.cores_exist(ifc=sshifc):
                LOG.info('Cores found!')
                cores_dir, _ = self._get_or_create_dirs(
                    "%s/%s" % (CORES_DIR, sshifc.address), self.root)

                SCMD.ssh.scp_get(ifc=sshifc,
                                 source='/var/core/*',
                                 destination=cores_dir)
                sshifc.api.run('rm -f /var/core/*')

                # Add read permissions to group and others.
                with ShellInterface(shell=True) as shell:
                    shell.api.run('chmod -R go+r %s' % cores_dir)
                if sshifc.device:
                    d.data[sshifc.device.get_alias()] = True

            if self.mode == QKVIEW.ALWAYS or \
               (self.mode == QKVIEW.ON_FAIL and self.data.test_result and
                    not self.data.test_result.wasSuccessful()):
                try:
                    LOG.info("Generating qkview...")
                    ret = SCMD.ssh.generic('qkview', ifc=sshifc)
                    name = re.search('^/var/.+$', ret.stderr,
                                     flags=re.M).group(0)

                    LOG.info("Downloading qkview...")
                    qk_dir, _ = self._get_or_create_dirs(
                        "%s/%s" % (QKVIEWS_DIR, sshifc.address), self.root)

                    SCMD.ssh.scp_get(ifc=sshifc,
                                     source=name,
                                     destination=qk_dir)
                    if sshifc.api.exists(SCF_FILENAME):
                        SCMD.ssh.scp_get(ifc=sshifc,
                                         source=SCF_FILENAME,
                                         destination=qk_dir)

                except SSHTimeoutError:
                    LOG.warning('Could not complete qkview on %s',
                                sshifc.address)
Пример #7
0
class CoreCollector(Thread):

    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector, self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()

    def _get_session_dir(self):
        path = self.session.path

        if path and not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)

        return path

    def _get_or_create_dirs(self, name, root=None):
        if root is None:
            root = self._get_session_dir()
        else:
            if not os.path.isdir(root):
                root = os.path.join(self._get_session_dir(), root)

        created = False
        path = os.path.join(root, name)

        # Windows-based NAS doesn't support :'s in names
        path = path.replace(':', '@')
        if not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)
            created = True

        return path, created

    def run(self):
        LOG.info('Looking for cores...')
        d = self.data.cores
        d.data = {}
        d.checked = time.time()

        with self.sshifc as sshifc:
            if SCMD.ssh.cores_exist(ifc=sshifc):
                LOG.info('Cores found!')
                cores_dir, _ = self._get_or_create_dirs("%s/%s" % (CORES_DIR, sshifc.address),
                                                        self.root)

                SCMD.ssh.scp_get(ifc=sshifc, source='/var/core/*',
                                 destination=cores_dir)
                sshifc.api.run('rm -f /var/core/*')

                # Add read permissions to group and others.
                with ShellInterface(shell=True) as shell:
                    shell.api.run('chmod -R go+r %s' % cores_dir)
                if sshifc.device:
                    d.data[sshifc.device.get_alias()] = True

            if self.mode == QKVIEW.ALWAYS or \
               (self.mode == QKVIEW.ON_FAIL and self.data.test_result and
                    not self.data.test_result.wasSuccessful()):
                try:
                    LOG.info("Generating qkview...")
                    ret = SCMD.ssh.generic('qkview', ifc=sshifc)
                    name = re.search('^/var/.+$', ret.stderr, flags=re.M).group(0)

                    LOG.info("Downloading qkview...")
                    qk_dir, _ = self._get_or_create_dirs("%s/%s" % (QKVIEWS_DIR, sshifc.address),
                                                         self.root)

                    SCMD.ssh.scp_get(ifc=sshifc, source=name, destination=qk_dir)
                    if sshifc.api.exists(SCF_FILENAME):
                        SCMD.ssh.scp_get(ifc=sshifc, source=SCF_FILENAME,
                                         destination=qk_dir)

                except SSHTimeoutError:
                    LOG.warning('Could not complete qkview on %s', sshifc.address)
Пример #8
0
class CreateAdcPoolObjects(IcontrolRestCommand):  # @IgnorePep8
    """ Create the specified number of ADC Pool objects on the BIG-IQ for
        the specified BIG-IP.  Works for BIG-IQ 4.6.0 and later.

        You must deploy the ADC objects from the BIG-IQ to the BIG-IP(s) with
        a separate call.
    """
    def __init__(self, pool_count, pool_member_count, bigip, node_names,
                 node_addresses, node_selflinks, *args, **kwargs):
        """ Object initialization.

            @param pool_count: The number of ADC pools to create.
            @param pool_member_count: The number of members per ADC pool to
            create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param node_names: List of Node Names to use to link up with
            the pool members.
            @param node_addresses: List of Node IP Addresses to use to link
            up with the pool members.
            @param node_selflinks: List of Node Selflinks to use to link up
            with the pool members.
        """
        super(CreateAdcPoolObjects, self).__init__(*args, **kwargs)
        self.pool_count = pool_count
        self.pool_member_count = pool_member_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.node_names = node_names
        self.node_addresses = node_addresses
        self.node_selflinks = node_selflinks
        self.ip_gen = ipv4_address_generator()

    def run(self):
        """ Generate the specified number of ADC Pool objects for the given
            BIG-IP in the default BIG-IQ.

            @returns: List of Pool names and list of Pool selflinks that
            were generated.
        """
        LOG.info("Creating {0} pool(s) in the BigIQ working config...".format(
            self.pool_count))
        pool_names = []
        pool_selflinks = []
        num = NEXT_NUMBER.get_next_number()

        for i in range(self.pool_count):
            self.object_counter = NEXT_NUMBER.get_next_number()
            pool_name = 'ScalePool-%s-device%d-obj%d' %\
                        (self.cfgifc.get_session().name, num,
                         self.object_counter)
            pool_names.append(pool_name)
            payload = WorkingLtmPool(name=pool_name,
                                     fullPath='/' + PARTITION + '/' +
                                     pool_name,
                                     partition=PARTITION,
                                     loadBalancingMode='round-robin')
            payload.deviceReference.set(
                'https://localhost' + DeviceResolver.DEVICE_URI %
                (DEFAULT_ADC_GROUP, self.bigip['machineId']))
            create_pool_resp = self.api.post(WorkingLtmPool.URI, payload)
            pool_selflinks.append(create_pool_resp.selfLink)

            pool_member_port = 0
            for _ in range(self.pool_member_count):
                self.object_counter = NEXT_NUMBER.get_next_number()
                pool_member_port += 1
                # Will need reworking if we decide we need more than 65k
                # members per pool
                pool_member_name = '{0}:{1}'.format(self.node_names[i],
                                                    pool_member_port)
                payload = WorkingLtmPoolMember(name=pool_member_name,
                                               address=self.node_addresses[i],
                                               fullPath='/' + PARTITION + '/' +
                                               pool_member_name,
                                               partition=PARTITION)
                payload.nodeReference.set(self.node_selflinks[i])
                self.api.post(WorkingLtmPoolMember.URI % create_pool_resp.id,
                              payload)
        return pool_names, pool_selflinks