Пример #1
0
    def __init__(self, pool_count, pool_member_count, bigip, node_names,
                 node_addresses, node_selflinks, *args, **kwargs):
        """ Object initialization.

            @param pool_count: The number of ADC pools to create.
            @param pool_member_count: The number of members per ADC pool to
            create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param node_names: List of Node Names to use to link up with
            the pool members.
            @param node_addresses: List of Node IP Addresses to use to link
            up with the pool members.
            @param node_selflinks: List of Node Selflinks to use to link up
            with the pool members.
        """
        super(CreateAdcPoolObjects, self).__init__(*args, **kwargs)
        self.pool_count = pool_count
        self.pool_member_count = pool_member_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.node_names = node_names
        self.node_addresses = node_addresses
        self.node_selflinks = node_selflinks
        self.ip_gen = ipv4_address_generator()
Пример #2
0
    def __init__(self, sshifc, data, mode, root=None):
        super(CoreCollector,
              self).__init__(name='CoreCollector@%s' % sshifc.address)
        self.sshifc = sshifc
        self.data = data
        self.mode = mode
        self.root = root

        self.context = ContextHelper(__name__)
        self.session = self.context.get_config().get_session()
Пример #3
0
 def __init__(self, obj_type='Object', parent_id='name', *args, **kwargs):
     """
     Arguments are not necessary, but can help communicate information about the object at a glance.
     @param obj_type: Type of the object for which the name is being generated. e.g. 'Pool'
     @param parent_id: Unique identifier of the parent object, e.g. machineId, IP address, etc.
     @return: generated name
     """
     super(GenerateObjectName, self).__init__(*args, **kwargs)
     self.obj_type = obj_type
     self.parent_id = parent_id
     self.context = ContextHelper(__name__)
     self.session = self.context.get_config().get_session().name
Пример #4
0
 def __init__(self, handle, type=None):
     #self.handle = handle
     self.handle = 0
     self.type = type
     context = ContextHelper()
     self.cfgifc = context.get_config()
     rr = self.cfgifc.get_ranges()
     self.pools = self.cfgifc.get_respools()
     self.value_to_item = {}
     for device in self.cfgifc.get_devices(KIND_TMOS_BIGIP):
         #f5ite/bigip.1/mgmt/ip
         self._handle_vars['f5ite/{}.{}/mgmt/ip'.format(
             *device.alias.split('-'))] = device.address
Пример #5
0
    def __init__(self, node_count, bigip, *args, **kwargs):
        """ Object initialization.

            @param node_count: The number of ADC nodes to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
        """
        super(CreateAdcNodeObjects, self).__init__(*args, **kwargs)
        self.node_count = node_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.ip_gen = ipv4_address_generator()
Пример #6
0
    def reset_all(self):
        group = RCMD.device.DEFAULT_ALLBIGIQS_GROUP
        for device in [self.default] + self.peers:
            with SSHInterface(device=device) as sshifc:
                LOG.info('Wiping storage on {0}'.format(device))
                SCMD.ssh.generic(SCMD.bigiq.ha.HA_WIPE_COMMAND, ifc=sshifc)

        with EmapiInterface(device=device, auth=AUTH.BASIC) as rstifc:
            RCMD.system.wait_restjavad([self.default] + self.peers, ifc=rstifc)

        # For IPv6 runs where localhost will get reset to IPv4.
        for device in [self.default] + self.peers:
            with EmapiInterface(device=device, auth=AUTH.BASIC) as rstifc:
                resp = rstifc.api.get(DeviceResolver.DEVICES_URI % group)
                selfip_expect = device.get_discover_address()
                selfips_actual = [x.address for x in resp['items']]
                if selfip_expect not in selfips_actual:
                    LOG.info(
                        "selfip mismatch. Setting {0}".format(selfip_expect))
                    self_addr = IPAddress(selfip_expect)
                    payload = NetworkDiscover()
                    payload.discoveryAddress = self_addr.format(ipv6_full)
                    rstifc.api.put(NetworkDiscover.URI, payload=payload)
                    DeviceResolver.wait(rstifc.api, group)

        # For BZ workarounds..

        bigips = []
        context = ContextHelper()
        default_bigiq = context.get_icontrol(device=self.default).version
        session = context.get_config().get_session().name

        for device in context.get_config().get_devices():
            v = context.get_icontrol(device=device).version
            if v.product.is_bigip and v >= 'bigip 11.3.0':
                bigips.append(device)

        if default_bigiq > 'bigiq 4.3.0' and default_bigiq < 'bigiq 4.5.0':
            with EmapiInterface(device=self.default,
                                auth=AUTH.BASIC) as rstifc:
                RCMD.device.clean_dg_certs(bigips, ifc=rstifc)

            with EmapiInterface(device=self.default,
                                auth=AUTH.BASIC) as rstifc:
                RCMD.system.bz_help1([self.default] + self.peers, ifc=rstifc)

        if default_bigiq > 'bigiq 4.3.0' and default_bigiq < 'bigiq 4.5.0':
            with SSHInterface(device=self.default) as sshifc:
                SCMD.bigiq.ha.wait_ha_peer(self.peers,
                                           session=session,
                                           ifc=sshifc)
Пример #7
0
    def configure(self, options, noseconfig):
        """ Call the super and then validate and call the relevant parser for
        the configuration file passed in """
        super(Cores, self).configure(options, noseconfig)
        self.data = ContextHelper().set_container(PLUGIN_NAME)
        self.enabled = not (noseconfig.options.with_qkview.upper()
                            == QKVIEW.NEVER)

        # There's really no point in collecting just quickviews without other logs
        # For now, disable this plugin if --no-logcollect is present.
        self.enabled = self.enabled and not (noseconfig.options.no_logcollect)

        self.data.cores = {}
        self.blocked_contexts = {}
Пример #8
0
    def __init__(self, vip_count, bigip, pool_names, pool_selflinks, *args,
                 **kwargs):
        """ Object initialization.

            @param vip_count: The number of ADC VIPs to create.
            @param bigip: BIG-IP device, as returned by MachineIdResolver.
            @param pool_names: Names of the Pools previously created.
            @param pool_selflinks: List of Pool Selflinks to use to link up
            with the VIPs.
        """
        super(AdcVipObjectsCreate, self).__init__(*args, **kwargs)
        self.vip_count = vip_count
        self.bigip = bigip
        self.object_counter = 0
        self.context = ContextHelper(__name__)
        self.cfgifc = self.context.get_config()
        self.pool_names = pool_names
        self.pool_selflinks = pool_selflinks
        self.ip_gen = ipv4_address_generator()
Пример #9
0
    def setup(self):
        super(HAPromoteStage, self).setup()
        LOG.info('Promotion stage for: %s', self.default)

        self.default.specs.default = False
        self.peer.specs.default = True

        LOG.info("old default = %s", self.default)
        LOG.info("new default = %s", self.peer)

        # if this is active/standby, promote, otherwise, not needed.
        if self.ha_passive:
            # Prepare command to send to promote
            payload = Options()
            payload.command = 'SET_PRIMARY'

            LOG.info("Picking up the list of peers from the new primary")
            context = ContextHelper(__name__)
            rest = context.get_icontrol_rest(device=self.peer).api
            resp = rest.get(DeviceResolver.DEVICES_URI %
                            DEFAULT_ALLBIGIQS_GROUP)

            # Look for the machine id of the peer to promote
            for item in resp['items']:
                if item.address == self.peer.get_discover_address():
                    payload.machineId = item.machineId
                    LOG.info("Promoting peer to primary from peer")
                    rest.post(FailoverState.URI, payload=payload)

            # wait for restjavad to go down...
            wait(lambda: rest.get(DeviceResolver.DEVICES_URI %
                                  DEFAULT_ALLBIGIQS_GROUP)['items'],
                 negated=True,
                 progress_cb=lambda ret:
                 'Waiting for restjavad on {0} to go down.'.format(self.default
                                                                   ))
            # wait for it to come back up
            RCMD.system.wait_restjavad([self.peer])
Пример #10
0
 def configure(self, options, noseconfig):
     """ Call the super and then validate and call the relevant parser for
     the configuration file passed in """
     super(CoresStop, self).configure(options, noseconfig)
     self.context = ContextHelper()
     self.enabled = noseconfig.options.stop_on_core
Пример #11
0
def run_playbooks(playbook, tags=[], context=None, options=None):
    """
    @param playbook: The playbook(s) to be run.
    @type playbook: str or iterable
    @param tags: Run only plays tagged with these (or)
    @type tags: list
    @param context: The nose context where the playbook(s) will be executed
    @type context: instance
    """

    cfgifc = ContextHelper().get_config()
    LOG.debug('In run_playbooks(%s)...', playbook)

    # Really not liking how variables are called constants and how there are N
    # ways of assigning them.
    C.DEFAULT_ROLES_PATH = [os.path.expanduser('~/.ansible/roles'),
                            os.path.join(HERE, 'roles')]
    C.RETRY_FILES_ENABLED = False
    C.DEFAULT_HASH_BEHAVIOUR = 'merge'
    C.ANSIBLE_PIPELINING = True
    action_loader.add_directory(os.path.join(HERE, 'action_plugins'))
    lookup_loader.add_directory(os.path.join(HERE, 'lookup_plugins'))
    loader = DataLoader()
    inventory = InventoryManager(loader=loader, sources='/dev/null')
    variable_manager = VariableManager(loader, inventory)
    o = OptionsStrict(connection='smart', forks=10, become=None,
                      become_method=None, become_user=None, check=False,
                      listhosts=False, listtasks=False, listtags=False,
                      syntax=False, module_path=[os.path.join(HERE, 'library')],
                      diff=False,  # tags=tags,
                      verbosity=1, timeout=1,
                      )

    if options.logger_level:
        display.logger_level = options.pop('logger_level')

    if options:
        o.update(options)

    passwords = dict(vault_pass='******')
    display.verbosity = o.verbosity

    inventory.add_group('all')
    a = inventory.groups['all']
    a.set_variable(VAR_F5TEST_CONFIG, cfgifc.api)
    a.set_variable('f5test_itemd', {})
    if context:
        tmp = nose_selector(context)
        address = test_address(context)
        a.set_variable('f5test_module', tmp.replace(':', '.'))
        # ITE compatibility: templates can refer to metadata values (e.g. TCID)
        if hasattr(context, ITE_METADATA):
            a.set_variable('f5test_itemd', getattr(context, ITE_METADATA))
        if address[1]:
            name = address[1].rsplit('.')[-1]
            a.set_variable('f5test_module_name', name)
    a.set_variable('playbook_name', os.path.splitext(os.path.basename(playbook))[0])
    for device in cfgifc.get_devices(KIND_ANY):
        prev = a
        name = ''
        for sub_kind in device.kind.bits:
            name += sub_kind
            inventory.add_group(name)
            prev.add_child_group(inventory.groups[name])
            prev = inventory.groups[name]
            name += '.'

        fingerprint = cfgifc.get_session().get_fingerprint(hash=True)
        for tag in tags:
            a.set_variable(tag, True)
        session = cfgifc.get_session()
        a.set_variable('f5test_session', OptionsStrict(name=session.name,
                                                       name_md5=session.name_md5))
        a.set_variable('f5test_respools', session.get_respool_handler().pools)
        a.set_variable('f5test_utils', JinjaUtils())
        a.set_variable('f5test_ranges', session.get_respool_handler().ranges)
        a.set_variable('machine_fingerprint', fingerprint)
        # Colon must mean something for Ansible
        if device.alias != 'localhost':
            inventory.add_host(device.alias, str(device.kind).replace(':', '.'))
        h = inventory.get_host(device.alias)
        h.set_variable('f5test_device', device)
        h.set_variable('f5test_kind', device.kind)
        h.set_variable('f5test_mgmt_address', device.get_address())
        h.set_variable('f5test_port_https', device.ports['https'])
        h.set_variable('f5test_username', device.get_admin_creds().username)
        h.set_variable('f5test_password', device.get_admin_creds().password)
        h.set_variable('ansible_host', device.get_discover_address())
        h.set_variable('ansible_ssh_port', device.ports['ssh'])
        h.set_variable('ansible_user', device.get_root_creds().username)
        h.set_variable('ansible_ssh_pass', device.get_root_creds().password)
        for spec, v in device.specs.get(HOST_VARS, {}).items():
            h.set_variable(spec, v)

        for group in device.groups:
            inventory.add_group(group)
            g = inventory.groups[group]
            a.add_child_group(g)
            g.add_host(h)

    names = [playbook] if isinstance(playbook, str) else playbook

    for g, v in cfgifc.api.get(GROUP_VARS, {}).items():
        group = inventory.groups.get(g)
        if group:
            group.vars.update(v)

    # Look for playbooks relative to caller's base directory
    frame = inspect.stack()[1]
    module = inspect.getmodule(frame[0])
    here = os.path.dirname(os.path.abspath(module.__file__))

    playbooks = [x if os.path.isabs(x) else os.path.join(here, FIXTURES_DIR, x)
                 for x in names]

    executor = PlaybookExecutor(
        playbooks=playbooks,
        inventory=inventory,
        variable_manager=variable_manager,
        loader=loader,
        options=o,
        passwords=passwords)

    options = OptionsStrict(rc=-1, failed=[])
    cb = MyCallback(options=options)
    executor._tqm._callback_plugins.append(cb)
    try:
        options.rc = executor.run()
    finally:
        shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)

    # p.vips.sync()
    return options