Пример #1
0
    def test_network_3_existing_vxlan_nodes_1_requested_vxlan_nodes(
            self,
            network_state='tests/bigip_test_vxlan_3_records.json',
            cloud_state='tests/kubernetes_openshift_1_node.json'):
        """Test: Cloud openshift environment with 0 nodes."""
        # Get the test data
        self.read_test_vectors(cloud_state=cloud_state,
                               network_state=network_state)

        # Do the BIG-IP configuration
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])

        # Verify we first query bigip once for the initial state and
        # then perform an update due to differences
        self.assertEqual(
            self.mgr.mgmt_root().tm.net.fdb.tunnels.tunnel.load.call_count, 2)

        # Compare final content with self.network_state - should be the same
        self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
Пример #2
0
    def test_network_1_existing_vxlan_nodes_1_requested_vxlan_nodes(
            self,
            network_state='tests/bigip_test_vxlan_1_record.json',
            cloud_state='tests/kubernetes_openshift_1_node.json'):
        """Test: openshift environment with 1 nodes."""
        # Get the test data
        self.read_test_vectors(cloud_state=cloud_state,
                               network_state=network_state)

        # Do the BIG-IP configuration
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])

        # Verify we only query bigip once for the initial state and
        # don't try to write an update if nothing has changed.
        self.assertEqual(
            self.mgr.mgmt_root().tm.net.fdb.tunnels.tunnel.load.call_count, 1)

        # Compare final content with self.network_state - should be the same
        self.assertEqual(self.compute_fdb_records(), self.vxlan_tunnel.records)
Пример #3
0
    def test_network_bad_partition_name(
            self,
            network_state='tests/bigip_test_vxlan_3_records.json',
            cloud_state='tests/kubernetes_openshift_1_node.json'):
        """Test: BigIP not updated if the partition name format is bad."""
        self.read_test_vectors(cloud_state=cloud_state,
                               network_state=network_state)

        # Verify original configuration is untouched if we have errors
        # in the cloud config file
        self.cloud_data['openshift-sdn']['vxlan-name'] = \
            '/bad/partition/name/idf/'
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
        self.assertFalse(hasattr(self, 'vxlan_tunnel'))

        self.cloud_data['openshift-sdn']['vxlan-name'] = \
            'bad/partition/name'
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
        self.assertFalse(hasattr(self, 'vxlan_tunnel'))

        self.cloud_data['openshift-sdn']['vxlan-name'] = ''
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
        self.assertFalse(hasattr(self, 'vxlan_tunnel'))
Пример #4
0
    def test_network_bad_vxlan_ip(
            self,
            network_state='tests/bigip_test_vxlan_3_records.json',
            cloud_state='tests/kubernetes_openshift_1_node.json'):
        """Test: BigIP not updated if IP address in badly formatted."""
        self.read_test_vectors(cloud_state=cloud_state,
                               network_state=network_state)

        # Verify original configuration is untouched if we have errors
        # in the cloud config file
        self.cloud_data['openshift-sdn']['vxlan-node-ips'][0] = '55'
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
        self.assertEqual(self.network_data, self.vxlan_tunnel.records)

        self.cloud_data['openshift-sdn']['vxlan-node-ips'][0] = 55
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
        self.assertEqual(self.network_data, self.vxlan_tunnel.records)

        self.cloud_data['openshift-sdn']['vxlan-node-ips'][0] = 'myaddr'
        cfg = ctlr.create_network_config(self.cloud_data)
        apply_network_fdb_config(self.mgr.mgmt_root(), cfg['fdb'])
        self.assertEqual(self.network_data, self.vxlan_tunnel.records)
    def _do_reset(self):
        log.debug('config handler thread start')

        with self._condition:
            # customProfiles is true when we've written out a custom profile.
            # Once we know we've written out a profile, we can call delete
            # if needed.
            customProfiles = False
            while True:
                self._condition.acquire()
                if not self._pending_reset and not self._stop:
                    self._condition.wait()
                log.debug('config handler woken for reset')

                self._pending_reset = False
                self._condition.release()

                if self._stop:
                    log.info('stopping config handler')
                    if self._backoff_timer is not None:
                        self.cleanup_backoff()
                    break

                start_time = time.time()

                config = _parse_config(self._config_file)
                # No 'resources' indicates that the controller is not
                # yet ready -- it does not mean to apply an empty config
                if 'resources' not in config:
                    continue
                verify_interval, _ = _handle_global_config(config)
                _handle_openshift_sdn_config(config)
                self.set_interval_timer(verify_interval)

                cfg_network = create_network_config(config)
                incomplete = 0

                for mgr in self._managers:
                    partition = mgr.get_partition()
                    cfg_ltm = create_ltm_config(partition, config)
                    try:
                        # Manually create custom profiles;
                        # CCCL doesn't yet do this
                        if 'customProfiles' in cfg_ltm:
                            tmp = 0
                            customProfiles, tmp = _create_custom_profiles(
                                mgr.mgmt_root(), partition,
                                cfg_ltm['customProfiles'])
                            incomplete += tmp

                        # Apply the BIG-IP config after creating profiles
                        # and before deleting profiles
                        incomplete += mgr._apply_ltm_config(cfg_ltm)

                        # Manually delete custom profiles (if needed)
                        if customProfiles:
                            _delete_unused_ssl_profiles(
                                mgr.mgmt_root(), partition, cfg_ltm)

                    except F5CcclError as e:
                        # We created an invalid configuration, raise the
                        # exception and fail
                        log.error("CCCL Error: %s", e.msg)
                        raise e

                if 'fdb' in cfg_network:
                    incomplete += apply_network_fdb_config(
                        self._managers[0].mgmt_root(), cfg_network['fdb'])

                if incomplete:
                    # Error occurred, perform retries
                    self.handle_backoff()
                else:
                    if (self._interval
                            and self._interval.is_running() is False):
                        self._interval.start()
                    self._backoff_time = 1
                    if self._backoff_timer is not None:
                        self.cleanup_backoff()

                perf_enable = os.environ.get('SCALE_PERF_ENABLE')
                if perf_enable:  # pragma: no cover
                    test_data = {}
                    app_count = 0
                    backend_count = 0
                    for service in config['resources']['test'][
                            'virtualServers']:
                        app_count += 1
                        backends = 0
                        for pool in config['resources']['test']['pools']:
                            if pool['name'] == service['name']:
                                backends = len(pool['members'])
                                break
                        test_data[service['name']] = backends
                        backend_count += backends
                    test_data['Total_Services'] = app_count
                    test_data['Total_Backends'] = backend_count
                    test_data['Time'] = time.time()
                    json_data = json.dumps(test_data)
                    log.info('SCALE_PERF: Test data: %s', json_data)

                log.debug('updating tasks finished, took %s seconds',
                          time.time() - start_time)

        if self._interval:
            self._interval.stop()