Пример #1
0
    def __init__(self, pool_count, pool_member_count, bigip, node_names,
                 node_addresses, node_selflinks, *args, **kwargs):
        """ Object initialization.

            @param pool_count: The number of ADC pools to create.
            @param pool_member_count: The number of members per ADC pool to
            create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param node_names: List of Node Names to use to link up with
            the pool members.
            @param node_addresses: List of Node IP Addresses to use to link
            up with the pool members.
            @param node_selflinks: List of Node Selflinks to use to link up
            with the pool members.
        """
        super(CreateAdcPoolObjects, self).__init__(*args, **kwargs)
        self.pool_count = pool_count
        self.pool_member_count = pool_member_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.node_names = node_names
        self.node_addresses = node_addresses
        self.node_selflinks = node_selflinks
        self.ip_gen = ipv4_address_generator()
Пример #2
0
    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector,
              self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()
Пример #3
0
    def run(self):
        LOG.info('Getting Stats for Scale...')
        with SSHInterface(device=self.device, timeout=TIMEOUT) as sshifc:
            try:
                # Create directory for stats
                device_dir = self.device.get_address() + "-" + self.device.alias

                stat_dir = os.path.join(self.session.path, DIRECTORY,
                                        device_dir)
                stat_dir = os.path.expanduser(stat_dir)
                stat_dir = os.path.expandvars(stat_dir)
                if not os.path.exists(stat_dir):
                    os.makedirs(stat_dir)

                # Create directory for logs.
                log_dir = os.path.join(self.session.path, DIRECTORY,
                                       device_dir, 'log')
                log_dir = os.path.expanduser(log_dir)
                log_dir = os.path.expandvars(log_dir)
                if not os.path.exists(log_dir):
                    os.makedirs(log_dir)

                # Collect specific files
                for log in LOGS:
                    ret = sshifc.api.run('ls -1 %s | wc -l' % log)
                    if not ret.status and int(ret.stdout):
                        SCMD.ssh.scp_get(ifc=sshifc, source=log,
                                         destination=log_dir)

                context = ContextHelper(__name__)
                r = context.get_icontrol_rest(device=self.device).api
                output = r.get('/mgmt/shared/diagnostics')
                with open(os.path.join(stat_dir, 'diagnostics'), 'wt') as f:
                    json.dump(output, f, indent=4)

                if SCMD.ssh.file_exists(ifc=sshifc, filename=FILE):
                    SCMD.ssh.scp_get(ifc=sshifc, source=FILE,
                                     destination=stat_dir)

                # Collect stats
                for stat in STATS:
                    output = SCMD.ssh.generic(stat, ifc=sshifc)
                    with open(os.path.join(stat_dir, stat.split()[0]), 'wt') as f:
                        f.write(output.stdout)

                java_pid = SCMD.ssh.generic("cat /service/restjavad/supervise/pid",
                                            ifc=sshifc).stdout
                output = SCMD.ssh.generic('lsof -p %s' % java_pid, ifc=sshifc)
                with open(os.path.join(stat_dir, 'lsof'), 'wt') as f:
                    f.write(output.stdout)

            except SSHTimeoutError:
                LOG.warning('Could not complete collecting log and stats on %s',
                            self.device)
Пример #4
0
 def __init__(self, obj_type='Object', parent_id='name', *args, **kwargs):
     """
     Arguments are not necessary, but can help communicate information about the object at a glance.
     @param obj_type: Type of the object for which the name is being generated. e.g. 'Pool'
     @param parent_id: Unique identifier of the parent object, e.g. machineId, IP address, etc.
     @return: generated name
     """
     super(GenerateObjectName, self).__init__(*args, **kwargs)
     self.obj_type = obj_type
     self.parent_id = parent_id
     self.context = ContextHelper(__name__)
     self.session = self.context.get_config().get_session().name
Пример #5
0
    def __init__(self, node_count, bigip, *args, **kwargs):
        """ Object initialization.

            @param node_count: The number of ADC nodes to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
        """
        super(CreateAdcNodeObjects, self).__init__(*args, **kwargs)
        self.node_count = node_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.ip_gen = ipv4_address_generator()
Пример #6
0
 def __init__(self, handle, type=None):
     #self.handle = handle
     self.handle = 0
     self.type = type
     context = ContextHelper()
     self.cfgifc = context.get_config()
     rr = self.cfgifc.get_ranges()
     self.pools = self.cfgifc.get_respools()
     self.value_to_item = {}
     for device in self.cfgifc.get_devices(KIND_TMOS_BIGIP):
         #f5ite/bigip.1/mgmt/ip
         self._handle_vars['f5ite/{}.{}/mgmt/ip'.format(
             *device.alias.split('-'))] = device.address
Пример #7
0
    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector, self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()
Пример #8
0
class CreateAdcNodeObjects(IcontrolRestCommand):  # @IgnorePep8
    """ Create the specified number of ADC Node objects on the BIG-IQ for
        the specified BIG-IP.  Works for BIG-IQ 4.6.0 and later.

        You must deploy the ADC objects from the BIG-IQ to the BIG-IP(s) with
        a separate call.
    """
    def __init__(self, node_count, bigip, *args, **kwargs):
        """ Object initialization.

            @param node_count: The number of ADC nodes to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
        """
        super(CreateAdcNodeObjects, self).__init__(*args, **kwargs)
        self.node_count = node_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.ip_gen = ipv4_address_generator()

    def run(self):
        """ Generate the specified number of ADC Node objects for the given
            BIG-IP in the default BIG-IQ.

            @return: List of Node Names that were generated, a list of
            Node Addresses that were generated, and the Self-IP links for
            each node generated.  These are needed when generating Pools and
            Virtual Servers.
        """
        LOG.info("Creating {0} node(s) in the BigIQ working config...".format(
            self.node_count))
        node_names = []
        node_addresses = []
        node_selflinks = []

        for _ in range(self.node_count):
            num = NEXT_NUMBER.get_next_number()
            self.object_counter = NEXT_NUMBER.get_next_number()
            node_name = 'ScaleNode-%s-device%d-obj%d' %\
                        (self.cfgifc.get_session().name, num,
                         self.object_counter)
            node_names.append(node_name)
            node_address = next(self.ip_gen)
            node_addresses.append(node_address)

            payload = WorkingLtmNode(name=node_name,
                                     address=node_address,
                                     partition=PARTITION)
            payload.deviceReference.set(
                'https://localhost' + DeviceResolver.DEVICE_URI %
                (DEFAULT_ADC_GROUP, self.bigip['machineId']))
            create_node_resp = self.api.post(WorkingLtmNode.URI, payload)
            node_selflinks.append(create_node_resp.selfLink)

        return node_names, node_addresses, node_selflinks
Пример #9
0
    def __init__(self, vip_count, bigip, pool_names, pool_selflinks, *args,
                 **kwargs):
        """ Object initialization.

            @param vip_count: The number of ADC VIPs to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param pool_names: Names of the Pools previously created.
            @param pool_selflinks: List of Pool Selflinks to use to link up
            with the VIPs.
        """
        super(AdcVipObjectsCreate, self).__init__(*args, **kwargs)
        self.vip_count = vip_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.pool_names = pool_names
        self.pool_selflinks = pool_selflinks
        self.ip_gen = ipv4_address_generator()
Пример #10
0
    def finalize(self, result):
        pool = []
        context = ContextHelper(__name__)

        multiple = self.options.get('multiple', 10)
        count = 0

        sorted_duts = sorted(self.data.duts, key=lambda ip: struct.
                             unpack("!L", inet_aton(str(ip.device.address)))[0])
        for dut in sorted_duts:
            v = context.get_icontrol(device=dut.device).version
            if v.product.is_bigiq or count % multiple == 0:
                t = ScaleStatsCollector(dut.device, self.data)
                t.start()
                pool.append(t)

            if v.product.is_bigip:
                count += 1

        for t in pool:
            t.join(TIMEOUT + 10)
Пример #11
0
class CoresStop(ExtendedPlugin):
    """
    Open a SSH connection and keep it alive during the entire run.
    Check for existence of cores on the default DUT after each test.
    If found, raise the shouldStop flag and stop end the test execution.

    Caveats:
    If all tests up until that point passed the run will show a 100% pass rate.
    Tests that were not run will not show up in the report
    """
    enabled = False
    score = 501  # Needs to be higher than the other report plugins that depend on it

    def options(self, parser, env):
        """Register commandline options."""
        parser.add_option(
            '--stop-on-core',
            action='store_true',
            dest='stop_on_core',
            default=False,
            help="Enable stopping when first core is found. (default: no)")

    def configure(self, options, noseconfig):
        """ Call the super and then validate and call the relevant parser for
        the configuration file passed in """
        super(CoresStop, self).configure(options, noseconfig)
        self.context = ContextHelper()
        self.enabled = noseconfig.options.stop_on_core

    def afterTest(self, test):
        sshifc = self.context.get_ssh()
        if SCMD.ssh.cores_exist(ifc=sshifc):
            LOG.error('Cores found after %s.' % test.id())
            self.result.shouldStop = True

    def prepareTestResult(self, result):
        self.result = result

    def finalize(self, result):
        self.context.teardown()
Пример #12
0
class AdcVipObjectsCreate(IcontrolRestCommand):  # @IgnorePep8
    """ Create the specified number of ADC VIP objects on the BIG-IQ for
        the specified BIG-IP.  Works for BIG-IQ 4.6.0 and later.

        You must deploy the ADC objects from the BIG-IQ to the BIG-IP(s) with
        a separate call.
    """
    def __init__(self, vip_count, bigip, pool_names, pool_selflinks, *args,
                 **kwargs):
        """ Object initialization.

            @param vip_count: The number of ADC VIPs to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param pool_names: Names of the Pools previously created.
            @param pool_selflinks: List of Pool Selflinks to use to link up
            with the VIPs.
        """
        super(AdcVipObjectsCreate, self).__init__(*args, **kwargs)
        self.vip_count = vip_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.pool_names = pool_names
        self.pool_selflinks = pool_selflinks
        self.ip_gen = ipv4_address_generator()

    def setup(self):
        """ Generate the specified number of ADC VIP objects for the given
            BIG-IP in the default BIG-IQ.
        """
        LOG.info("Creating {0} VIP(s) in the BigIQ working config...".format(
            self.vip_count))
        num = NEXT_NUMBER.get_next_number()

        for i in range(self.vip_count):
            self.object_counter = NEXT_NUMBER.get_next_number()
            vip_name = 'ScaleVip-%s-device%d-obj%d' %\
                       (self.cfgifc.get_session().name, num,
                        self.object_counter)
            vip_address = next(self.ip_gen)
            payload = WorkingLtmVip(name=vip_name,
                                    destination=vip_address + ':80',
                                    fullPath='/' + PARTITION + '/' + vip_name,
                                    partition=PARTITION,
                                    pool='/' + PARTITION + '/' +
                                    self.pool_names[i])
            payload.deviceReference.set(
                'https://localhost' + DeviceResolver.DEVICE_URI %
                (DEFAULT_ADC_GROUP, self.bigip['machineId']))
            payload.poolReference.set(self.pool_selflinks[i])
            self.api.post(WorkingLtmVip.URI, payload)
Пример #13
0
    def setup(self):
        super(HAPromoteStage, self).setup()
        LOG.info('Promotion stage for: %s', self.default)

        self.default.specs.default = False
        self.peer.specs.default = True

        LOG.info("old default = %s", self.default)
        LOG.info("new default = %s", self.peer)

        # if this is active/standby, promote, otherwise, not needed.
        if self.ha_passive:
            # Prepare command to send to promote
            payload = Options()
            payload.command = 'SET_PRIMARY'

            LOG.info("Picking up the list of peers from the new primary")
            context = ContextHelper(__name__)
            rest = context.get_icontrol_rest(device=self.peer).api
            resp = rest.get(DeviceResolver.DEVICES_URI %
                            DEFAULT_ALLBIGIQS_GROUP)

            # Look for the machine id of the peer to promote
            for item in resp['items']:
                if item.address == self.peer.get_discover_address():
                    payload.machineId = item.machineId
                    LOG.info("Promoting peer to primary from peer")
                    rest.post(FailoverState.URI, payload=payload)

            # wait for restjavad to go down...
            wait(lambda: rest.get(DeviceResolver.DEVICES_URI %
                                  DEFAULT_ALLBIGIQS_GROUP)['items'],
                 negated=True,
                 progress_cb=lambda ret:
                 'Waiting for restjavad on {0} to go down.'.format(self.default
                                                                   ))
            # wait for it to come back up
            RCMD.system.wait_restjavad([self.peer])
Пример #14
0
    def configure(self, options, noseconfig):
        """ Call the super and then validate and call the relevant parser for
        the configuration file passed in """
        super(Cores, self).configure(options, noseconfig)
        self.data = ContextHelper().set_container(PLUGIN_NAME)
        self.enabled = not (noseconfig.options.with_qkview.upper()
                            == QKVIEW.NEVER)

        # There's really no point in collecting just quickviews without other logs
        # For now, disable this plugin if --no-logcollect is present.
        self.enabled = self.enabled and not (noseconfig.options.no_logcollect)

        self.data.cores = {}
        self.blocked_contexts = {}
Пример #15
0
    def reset_all(self):
        group = RCMD.device.DEFAULT_ALLBIGIQS_GROUP
        for device in [self.default] + self.peers:
            with SSHInterface(device=device) as sshifc:
                LOG.info('Wiping storage on {0}'.format(device))
                SCMD.ssh.generic(SCMD.bigiq.ha.HA_WIPE_COMMAND, ifc=sshifc)

        with EmapiInterface(device=device, auth=AUTH.BASIC) as rstifc:
            RCMD.system.wait_restjavad([self.default] + self.peers, ifc=rstifc)

        # For IPv6 runs where localhost will get reset to IPv4.
        for device in [self.default] + self.peers:
            with EmapiInterface(device=device, auth=AUTH.BASIC) as rstifc:
                resp = rstifc.api.get(DeviceResolver.DEVICES_URI % group)
                selfip_expect = device.get_discover_address()
                selfips_actual = [x.address for x in resp['items']]
                if selfip_expect not in selfips_actual:
                    LOG.info(
                        "selfip mismatch. Setting {0}".format(selfip_expect))
                    self_addr = IPAddress(selfip_expect)
                    payload = NetworkDiscover()
                    payload.discoveryAddress = self_addr.format(ipv6_full)
                    rstifc.api.put(NetworkDiscover.URI, payload=payload)
                    DeviceResolver.wait(rstifc.api, group)

        # For BZ workarounds..

        bigips = []
        context = ContextHelper()
        default_bigiq = context.get_icontrol(device=self.default).version
        session = context.get_config().get_session().name

        for device in context.get_config().get_devices():
            v = context.get_icontrol(device=device).version
            if v.product.is_bigip and v >= 'bigip 11.3.0':
                bigips.append(device)

        if default_bigiq > 'bigiq 4.3.0' and default_bigiq < 'bigiq 4.5.0':
            with EmapiInterface(device=self.default,
                                auth=AUTH.BASIC) as rstifc:
                RCMD.device.clean_dg_certs(bigips, ifc=rstifc)

            with EmapiInterface(device=self.default,
                                auth=AUTH.BASIC) as rstifc:
                RCMD.system.bz_help1([self.default] + self.peers, ifc=rstifc)

        if default_bigiq > 'bigiq 4.3.0' and default_bigiq < 'bigiq 4.5.0':
            with SSHInterface(device=self.default) as sshifc:
                SCMD.bigiq.ha.wait_ha_peer(self.peers,
                                           session=session,
                                           ifc=sshifc)
Пример #16
0
class GenerateObjectName(Macro):
    object_counter = 0

    def __init__(self, obj_type='Object', parent_id='name', *args, **kwargs):
        """
        Arguments are not necessary, but can help communicate information about the object at a glance.
        @param obj_type: Type of the object for which the name is being generated. e.g. 'Pool'
        @param parent_id: Unique identifier of the parent object, e.g. machineId, IP address, etc.
        @return: generated name
        """
        super(GenerateObjectName, self).__init__(*args, **kwargs)
        self.obj_type = obj_type
        self.parent_id = parent_id
        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session().name

    @classmethod
    def increment_counter(cls):
        cls.object_counter += 1

    def run(self):
        self.increment_counter()
        return '{0}-{1}-{2}-obj{3}'.format(self.obj_type, self.session,
                                           self.parent_id, self.object_counter)
Пример #17
0
def run_playbooks(playbook, tags=[], context=None, options=None):
    """
    @param playbook: The playbook(s) to be run.
    @type playbook: str or iterable
    @param tags: Run only plays tagged with these (or)
    @type tags: list
    @param context: The nose context where the playbook(s) will be executed
    @type context: instance
    """

    cfgifc = ContextHelper().get_config()
    LOG.debug('In run_playbooks(%s)...', playbook)

    # Really not liking how variables are called constants and how there are N
    # ways of assigning them.
    C.DEFAULT_ROLES_PATH = [os.path.expanduser('~/.ansible/roles'),
                            os.path.join(HERE, 'roles')]
    C.RETRY_FILES_ENABLED = False
    C.DEFAULT_HASH_BEHAVIOUR = 'merge'
    C.ANSIBLE_PIPELINING = True
    action_loader.add_directory(os.path.join(HERE, 'action_plugins'))
    lookup_loader.add_directory(os.path.join(HERE, 'lookup_plugins'))
    loader = DataLoader()
    inventory = InventoryManager(loader=loader, sources='/dev/null')
    variable_manager = VariableManager(loader, inventory)
    o = OptionsStrict(connection='smart', forks=10, become=None,
                      become_method=None, become_user=None, check=False,
                      listhosts=False, listtasks=False, listtags=False,
                      syntax=False, module_path=[os.path.join(HERE, 'library')],
                      diff=False,  # tags=tags,
                      verbosity=1, timeout=1,
                      )

    if options.logger_level:
        display.logger_level = options.pop('logger_level')

    if options:
        o.update(options)

    passwords = dict(vault_pass='******')
    display.verbosity = o.verbosity

    inventory.add_group('all')
    a = inventory.groups['all']
    a.set_variable(VAR_F5TEST_CONFIG, cfgifc.api)
    a.set_variable('f5test_itemd', {})
    if context:
        tmp = nose_selector(context)
        address = test_address(context)
        a.set_variable('f5test_module', tmp.replace(':', '.'))
        # ITE compatibility: templates can refer to metadata values (e.g. TCID)
        if hasattr(context, ITE_METADATA):
            a.set_variable('f5test_itemd', getattr(context, ITE_METADATA))
        if address[1]:
            name = address[1].rsplit('.')[-1]
            a.set_variable('f5test_module_name', name)
    a.set_variable('playbook_name', os.path.splitext(os.path.basename(playbook))[0])
    for device in cfgifc.get_devices(KIND_ANY):
        prev = a
        name = ''
        for sub_kind in device.kind.bits:
            name += sub_kind
            inventory.add_group(name)
            prev.add_child_group(inventory.groups[name])
            prev = inventory.groups[name]
            name += '.'

        fingerprint = cfgifc.get_session().get_fingerprint(hash=True)
        for tag in tags:
            a.set_variable(tag, True)
        session = cfgifc.get_session()
        a.set_variable('f5test_session', OptionsStrict(name=session.name,
                                                       name_md5=session.name_md5))
        a.set_variable('f5test_respools', session.get_respool_handler().pools)
        a.set_variable('f5test_utils', JinjaUtils())
        a.set_variable('f5test_ranges', session.get_respool_handler().ranges)
        a.set_variable('machine_fingerprint', fingerprint)
        # Colon must mean something for Ansible
        if device.alias != 'localhost':
            inventory.add_host(device.alias, str(device.kind).replace(':', '.'))
        h = inventory.get_host(device.alias)
        h.set_variable('f5test_device', device)
        h.set_variable('f5test_kind', device.kind)
        h.set_variable('f5test_mgmt_address', device.get_address())
        h.set_variable('f5test_port_https', device.ports['https'])
        h.set_variable('f5test_username', device.get_admin_creds().username)
        h.set_variable('f5test_password', device.get_admin_creds().password)
        h.set_variable('ansible_host', device.get_discover_address())
        h.set_variable('ansible_ssh_port', device.ports['ssh'])
        h.set_variable('ansible_user', device.get_root_creds().username)
        h.set_variable('ansible_ssh_pass', device.get_root_creds().password)
        for spec, v in device.specs.get(HOST_VARS, {}).items():
            h.set_variable(spec, v)

        for group in device.groups:
            inventory.add_group(group)
            g = inventory.groups[group]
            a.add_child_group(g)
            g.add_host(h)

    names = [playbook] if isinstance(playbook, str) else playbook

    for g, v in cfgifc.api.get(GROUP_VARS, {}).items():
        group = inventory.groups.get(g)
        if group:
            group.vars.update(v)

    # Look for playbooks relative to caller's base directory
    frame = inspect.stack()[1]
    module = inspect.getmodule(frame[0])
    here = os.path.dirname(os.path.abspath(module.__file__))

    playbooks = [x if os.path.isabs(x) else os.path.join(here, FIXTURES_DIR, x)
                 for x in names]

    executor = PlaybookExecutor(
        playbooks=playbooks,
        inventory=inventory,
        variable_manager=variable_manager,
        loader=loader,
        options=o,
        passwords=passwords)

    options = OptionsStrict(rc=-1, failed=[])
    cb = MyCallback(options=options)
    executor._tqm._callback_plugins.append(cb)
    try:
        options.rc = executor.run()
    finally:
        shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)

    # p.vips.sync()
    return options
Пример #18
0
class CreateAdcPoolObjects(IcontrolRestCommand):  # @IgnorePep8
    """ Create the specified number of ADC Pool objects on the BIG-IQ for
        the specified BIG-IP.  Works for BIG-IQ 4.6.0 and later.

        You must deploy the ADC objects from the BIG-IQ to the BIG-IP(s) with
        a separate call.
    """
    def __init__(self, pool_count, pool_member_count, bigip, node_names,
                 node_addresses, node_selflinks, *args, **kwargs):
        """ Object initialization.

            @param pool_count: The number of ADC pools to create.
            @param pool_member_count: The number of members per ADC pool to
            create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param node_names: List of Node Names to use to link up with
            the pool members.
            @param node_addresses: List of Node IP Addresses to use to link
            up with the pool members.
            @param node_selflinks: List of Node Selflinks to use to link up
            with the pool members.
        """
        super(CreateAdcPoolObjects, self).__init__(*args, **kwargs)
        self.pool_count = pool_count
        self.pool_member_count = pool_member_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.node_names = node_names
        self.node_addresses = node_addresses
        self.node_selflinks = node_selflinks
        self.ip_gen = ipv4_address_generator()

    def run(self):
        """ Generate the specified number of ADC Pool objects for the given
            BIG-IP in the default BIG-IQ.

            @returns: List of Pool names and list of Pool selflinks that
            were generated.
        """
        LOG.info("Creating {0} pool(s) in the BigIQ working config...".format(
            self.pool_count))
        pool_names = []
        pool_selflinks = []
        num = NEXT_NUMBER.get_next_number()

        for i in range(self.pool_count):
            self.object_counter = NEXT_NUMBER.get_next_number()
            pool_name = 'ScalePool-%s-device%d-obj%d' %\
                        (self.cfgifc.get_session().name, num,
                         self.object_counter)
            pool_names.append(pool_name)
            payload = WorkingLtmPool(name=pool_name,
                                     fullPath='/' + PARTITION + '/' +
                                     pool_name,
                                     partition=PARTITION,
                                     loadBalancingMode='round-robin')
            payload.deviceReference.set(
                'https://localhost' + DeviceResolver.DEVICE_URI %
                (DEFAULT_ADC_GROUP, self.bigip['machineId']))
            create_pool_resp = self.api.post(WorkingLtmPool.URI, payload)
            pool_selflinks.append(create_pool_resp.selfLink)

            pool_member_port = 0
            for _ in range(self.pool_member_count):
                self.object_counter = NEXT_NUMBER.get_next_number()
                pool_member_port += 1
                # Will need reworking if we decide we need more than 65k
                # members per pool
                pool_member_name = '{0}:{1}'.format(self.node_names[i],
                                                    pool_member_port)
                payload = WorkingLtmPoolMember(name=pool_member_name,
                                               address=self.node_addresses[i],
                                               fullPath='/' + PARTITION + '/' +
                                               pool_member_name,
                                               partition=PARTITION)
                payload.nodeReference.set(self.node_selflinks[i])
                self.api.post(WorkingLtmPoolMember.URI % create_pool_resp.id,
                              payload)
        return pool_names, pool_selflinks
Пример #19
0
class CoreCollector(Thread):
    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector,
              self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()

    def _get_session_dir(self):
        path = self.session.path

        if path and not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)

        return path

    def _get_or_create_dirs(self, name, root=None):
        if root is None:
            root = self._get_session_dir()
        else:
            if not os.path.isdir(root):
                root = os.path.join(self._get_session_dir(), root)

        created = False
        path = os.path.join(root, name)

        # Windows-based NAS doesn't support :'s in names
        path = path.replace(':', '@')
        if not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)
            created = True

        return path, created

    def run(self):
        LOG.info('Looking for cores...')
        d = self.data.cores
        d.data = {}
        d.checked = time.time()

        with self.sshifc as sshifc:
            if SCMD.ssh.cores_exist(ifc=sshifc):
                LOG.info('Cores found!')
                cores_dir, _ = self._get_or_create_dirs(
                    "%s/%s" % (CORES_DIR, sshifc.address), self.root)

                SCMD.ssh.scp_get(ifc=sshifc,
                                 source='/var/core/*',
                                 destination=cores_dir)
                sshifc.api.run('rm -f /var/core/*')

                # Add read permissions to group and others.
                with ShellInterface(shell=True) as shell:
                    shell.api.run('chmod -R go+r %s' % cores_dir)
                if sshifc.device:
                    d.data[sshifc.device.get_alias()] = True

            if self.mode == QKVIEW.ALWAYS or \
               (self.mode == QKVIEW.ON_FAIL and self.data.test_result and
                    not self.data.test_result.wasSuccessful()):
                try:
                    LOG.info("Generating qkview...")
                    ret = SCMD.ssh.generic('qkview', ifc=sshifc)
                    name = re.search('^/var/.+$', ret.stderr,
                                     flags=re.M).group(0)

                    LOG.info("Downloading qkview...")
                    qk_dir, _ = self._get_or_create_dirs(
                        "%s/%s" % (QKVIEWS_DIR, sshifc.address), self.root)

                    SCMD.ssh.scp_get(ifc=sshifc,
                                     source=name,
                                     destination=qk_dir)
                    if sshifc.api.exists(SCF_FILENAME):
                        SCMD.ssh.scp_get(ifc=sshifc,
                                         source=SCF_FILENAME,
                                         destination=qk_dir)

                except SSHTimeoutError:
                    LOG.warning('Could not complete qkview on %s',
                                sshifc.address)
Пример #20
0
class CoreCollector(Thread):

    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector, self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()

    def _get_session_dir(self):
        path = self.session.path

        if path and not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)

        return path

    def _get_or_create_dirs(self, name, root=None):
        if root is None:
            root = self._get_session_dir()
        else:
            if not os.path.isdir(root):
                root = os.path.join(self._get_session_dir(), root)

        created = False
        path = os.path.join(root, name)

        # Windows-based NAS doesn't support :'s in names
        path = path.replace(':', '@')
        if not os.path.exists(path):
            oldumask = os.umask(0)
            os.makedirs(path)
            os.umask(oldumask)
            created = True

        return path, created

    def run(self):
        LOG.info('Looking for cores...')
        d = self.data.cores
        d.data = {}
        d.checked = time.time()

        with self.sshifc as sshifc:
            if SCMD.ssh.cores_exist(ifc=sshifc):
                LOG.info('Cores found!')
                cores_dir, _ = self._get_or_create_dirs("%s/%s" % (CORES_DIR, sshifc.address),
                                                        self.root)

                SCMD.ssh.scp_get(ifc=sshifc, source='/var/core/*',
                                 destination=cores_dir)
                sshifc.api.run('rm -f /var/core/*')

                # Add read permissions to group and others.
                with ShellInterface(shell=True) as shell:
                    shell.api.run('chmod -R go+r %s' % cores_dir)
                if sshifc.device:
                    d.data[sshifc.device.get_alias()] = True

            if self.mode == QKVIEW.ALWAYS or \
               (self.mode == QKVIEW.ON_FAIL and self.data.test_result and
                    not self.data.test_result.wasSuccessful()):
                try:
                    LOG.info("Generating qkview...")
                    ret = SCMD.ssh.generic('qkview', ifc=sshifc)
                    name = re.search('^/var/.+$', ret.stderr, flags=re.M).group(0)

                    LOG.info("Downloading qkview...")
                    qk_dir, _ = self._get_or_create_dirs("%s/%s" % (QKVIEWS_DIR, sshifc.address),
                                                         self.root)

                    SCMD.ssh.scp_get(ifc=sshifc, source=name, destination=qk_dir)
                    if sshifc.api.exists(SCF_FILENAME):
                        SCMD.ssh.scp_get(ifc=sshifc, source=SCF_FILENAME,
                                         destination=qk_dir)

                except SSHTimeoutError:
                    LOG.warning('Could not complete qkview on %s', sshifc.address)
Пример #21
0
 def configure(self, options, noseconfig):
     """ Call the super and then validate and call the relevant parser for
     the configuration file passed in """
     super(CoresStop, self).configure(options, noseconfig)
     self.context = ContextHelper()
     self.enabled = noseconfig.options.stop_on_core