コード例 #1
0
    def test_get_default_nic_networkgroups(self):
        cluster = self.env.create_cluster(api=True,
                                          net_provider='neutron',
                                          net_segment_type='gre')
        node = self.env.create_node(api=True)
        node_db = self.env.nodes[0]

        admin_nic = node_db.admin_interface
        other_iface = self.db.query(NodeNICInterface).filter_by(
            node_id=node['id']
        ).filter(
            not_(NodeNICInterface.id == admin_nic.id)
        ).first()

        interfaces = deepcopy(node_db.meta['interfaces'])

        # allocate ip from admin subnet
        admin_ip = str(IPNetwork(NetworkManager.get_admin_network().cidr)[0])
        for interface in interfaces:
            if interface['mac'] == admin_nic.mac:
                # reset admin ip for previous admin interface
                interface['ip'] = None
            elif interface['mac'] == other_iface.mac:
                # set new admin interface
                interface['ip'] = admin_ip

        node_db.meta['interfaces'] = interfaces

        self.app.put(
            reverse('NodeCollectionHandler'),
            json.dumps([{
                        'mac': admin_nic.mac,
                        'meta': node_db.meta,
                        'is_agent': True,
                        'cluster_id': cluster["id"]
                        }]),
            headers=self.default_headers,
            expect_errors=True
        )

        new_main_nic_id = node_db.admin_interface.id
        admin_nets = [n.name for n in self.db.query(
            NodeNICInterface).get(new_main_nic_id).assigned_networks]
        other_nets = [n.name for n in other_iface.assigned_networks]

        nics = NeutronManager.get_default_networks_assignment(node_db)
        def_admin_nic = [n for n in nics if n['id'] == new_main_nic_id]
        def_other_nic = [n for n in nics if n['id'] == other_iface.id]

        self.assertEquals(len(def_admin_nic), 1)
        self.assertEquals(len(def_other_nic), 1)
        self.assertEquals(new_main_nic_id, other_iface.id)
        self.assertEquals(
            set(admin_nets),
            set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
        self.assertEquals(
            set(other_nets),
            set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
コード例 #2
0
    def get_admin_ip(cls, node):
        """Getting admin ip and assign prefix from admin network."""
        network_manager = NetworkManager()
        admin_ip = network_manager.get_admin_ips_for_interfaces(
            node)[node.admin_interface.name]
        admin_ip = IPNetwork(admin_ip)

        # Assign prefix from admin network
        admin_net = IPNetwork(network_manager.get_admin_network().cidr)
        admin_ip.prefixlen = admin_net.prefixlen

        return str(admin_ip)
コード例 #3
0
    def get_admin_ip(cls, node):
        """Getting admin ip and assign prefix from admin network."""
        network_manager = NetworkManager()
        admin_ip = network_manager.get_admin_ips_for_interfaces(node)[
            node.admin_interface.name]
        admin_ip = IPNetwork(admin_ip)

        # Assign prefix from admin network
        admin_net = IPNetwork(network_manager.get_admin_network().cidr)
        admin_ip.prefixlen = admin_net.prefixlen

        return str(admin_ip)
コード例 #4
0
    def test_get_default_nic_networkgroups(self):
        cluster = self.env.create_cluster(api=True)
        node = self.env.create_node(api=True)
        node_db = self.env.nodes[0]

        admin_nic = node_db.admin_interface
        other_iface = self.db.query(NodeNICInterface).filter_by(
            node_id=node['id']
        ).filter(
            not_(NodeNICInterface.id == admin_nic.id)
        ).first()

        interfaces = deepcopy(node_db.meta['interfaces'])

        # allocate ip from admin subnet
        admin_ip = str(IPNetwork(NetworkManager.get_admin_network().cidr)[0])
        for interface in interfaces:
            if interface['mac'] == admin_nic.mac:
                # reset admin ip for previous admin interface
                interface['ip'] = None
            elif interface['mac'] == other_iface.mac:
                # set new admin interface
                interface['ip'] = admin_ip

        node_db.meta['interfaces'] = interfaces

        self.app.put(
            reverse('NodeCollectionHandler'),
            json.dumps([{
                        'mac': admin_nic.mac,
                        'meta': node_db.meta,
                        'is_agent': True,
                        'cluster_id': cluster["id"]
                        }]),
            headers=self.default_headers,
            expect_errors=True
        )

        new_main_nic_id = node_db.admin_interface.id
        self.assertEquals(new_main_nic_id, other_iface.id)
        self.assertEquals(
            other_iface.assigned_networks,
            NovaNetworkManager.get_default_nic_networkgroups(
                node_db, other_iface))
        self.assertEquals(
            self.db.query(
                NodeNICInterface).get(admin_nic.id).assigned_networks,
            NovaNetworkManager.get_default_nic_networkgroups(
                node_db, admin_nic))
コード例 #5
0
ファイル: base.py プロジェクト: andrey-borisov/fuel-web
class Environment(object):

    def __init__(self, app):
        self.db = db()
        self.app = app
        self.tester = TestCase
        self.tester.runTest = lambda a: None
        self.tester = self.tester()
        self.here = os.path.abspath(os.path.dirname(__file__))
        self.fixture_dir = os.path.join(self.here, "..", "fixtures")
        self.default_headers = {
            "Content-Type": "application/json"
        }
        self.releases = []
        self.clusters = []
        self.nodes = []
        self.network_manager = NetworkManager()

    def create(self, **kwargs):
        cluster = self.create_cluster(
            **kwargs.pop('cluster_kwargs', {})
        )
        for node_kwargs in kwargs.pop('nodes_kwargs', []):
            if "cluster_id" not in node_kwargs:
                if isinstance(cluster, dict):
                    node_kwargs["cluster_id"] = cluster["id"]
                else:
                    node_kwargs["cluster_id"] = cluster.id
            node_kwargs.setdefault("api", False)
            self.create_node(
                **node_kwargs
            )
        return cluster

    def create_release(self, api=False, **kwargs):
        version = str(randint(0, 100000000))
        release_data = {
            'name': u"release_name_" + version,
            'version': version,
            'description': u"release_desc" + version,
            'operating_system': 'CensOS',
            'roles': self.get_default_roles(),
            'networks_metadata': self.get_default_networks_metadata(),
            'attributes_metadata': self.get_default_attributes_metadata(),
            'volumes_metadata': self.get_default_volumes_metadata()
        }
        if kwargs:
            release_data.update(kwargs)
        if api:
            resp = self.app.post(
                reverse('ReleaseCollectionHandler'),
                params=json.dumps(release_data),
                headers=self.default_headers
            )
            self.tester.assertEquals(resp.status, 201)
            release = json.loads(resp.body)
            self.releases.append(
                self.db.query(Release).get(release['id'])
            )
        else:
            release = Release()
            for field, value in release_data.iteritems():
                setattr(release, field, value)
            self.db.add(release)
            self.db.commit()
            self.releases.append(release)
        return release

    def download_release(self, release_id):
        release_data = {
            'license_type': 'rhsm',
            'username': '******',
            'password': '******',
            'release_id': release_id
        }

        resp = self.app.post(
            reverse('RedHatAccountHandler'),
            params=json.dumps(release_data),
            headers=self.default_headers
        )
        self.tester.assertEquals(resp.status, 200)
        download_task = json.loads(resp.body)
        return self.db.query(Task).get(download_task['id'])

    def create_cluster(self, api=True, exclude=None, **kwargs):
        cluster_data = {
            'name': 'cluster-api-' + str(randint(0, 1000000))
        }
        if api:
            cluster_data['release'] = self.create_release(api=False).id
        else:
            cluster_data['release'] = self.create_release(api=False)

        if kwargs:
            cluster_data.update(kwargs)

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del cluster_data[ex]
                except KeyError as err:
                    logging.warning(err)
        if api:
            resp = self.app.post(
                reverse('ClusterCollectionHandler'),
                json.dumps(cluster_data),
                headers=self.default_headers
            )
            self.tester.assertEquals(resp.status, 201)
            cluster = json.loads(resp.body)
            self.clusters.append(
                self.db.query(Cluster).get(cluster['id'])
            )
        else:
            cluster = Cluster()
            for field, value in cluster_data.iteritems():
                setattr(cluster, field, value)
            self.db.add(cluster)
            self.db.commit()
            self.clusters.append(cluster)
        return cluster

    def create_node(
            self, api=False,
            exclude=None, expect_http=201,
            expect_message=None,
            **kwargs):
        metadata = kwargs.get('meta')
        default_metadata = self.default_metadata()
        if metadata:
            default_metadata.update(metadata)

        mac = self._generate_random_mac()
        if default_metadata['interfaces']:
            default_metadata['interfaces'][0]['mac'] = kwargs.get('mac', mac)

        node_data = {
            'mac': mac,
            'roles': ['controller'],
            'status': 'discover',
            'meta': default_metadata
        }
        if kwargs:
            meta = kwargs.pop('meta', None)
            node_data.update(kwargs)
            if meta:
                kwargs['meta'] = meta

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del node_data[ex]
                except KeyError as err:
                    logging.warning(err)
        if api:
            resp = self.app.post(
                reverse('NodeCollectionHandler'),
                json.dumps(node_data),
                headers=self.default_headers,
                expect_errors=True
            )
            self.tester.assertEquals(resp.status, expect_http)
            if expect_message:
                self.tester.assertEquals(resp.body, expect_message)
            if str(expect_http)[0] != "2":
                return None
            self.tester.assertEquals(resp.status, expect_http)
            node = json.loads(resp.body)
            node_db = self.db.query(Node).get(node['id'])
            self._set_interfaces_if_not_set_in_meta(
                node_db.id,
                kwargs.get('meta', None))
            self.nodes.append(node_db)
        else:
            node = Node()
            node.timestamp = datetime.now()
            if 'cluster_id' in node_data:
                cluster_id = node_data.pop('cluster_id')
                for cluster in self.clusters:
                    if cluster.id == cluster_id:
                        node.cluster = cluster
                        break
                else:
                    node.cluster_id = cluster_id
            for key, value in node_data.iteritems():
                setattr(node, key, value)
            node.attributes = self.create_attributes()
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            self.db.add(node)
            self.db.commit()
            if node.meta and node.meta.get('interfaces'):
                self._create_interfaces_from_meta(node)

            self.nodes.append(node)

        return node

    def create_rh_account(self, **kwargs):
        username = kwargs.pop("username", "rh_username")
        password = kwargs.pop("password", "rh_password")
        license_type = kwargs.pop("license_type", "rhsm")
        rh_account = RedHatAccount(
            username=username,
            password=password,
            license_type=license_type,
            **kwargs
        )
        self.db.add(rh_account)
        self.db.commit()
        return rh_account

    def create_task(self, **kwargs):
        task = Task(**kwargs)
        self.db.add(task)
        self.db.commit()
        return task

    def create_attributes(self):
        return NodeAttributes()

    def create_notification(self, **kwargs):
        notif_data = {
            "topic": "discover",
            "message": "Test message",
            "status": "unread",
            "datetime": datetime.now()
        }
        if kwargs:
            notif_data.update(kwargs)
        notification = Notification()
        notification.cluster_id = notif_data.get("cluster_id")
        for f, v in notif_data.iteritems():
            setattr(notification, f, v)
        self.db.add(notification)
        self.db.commit()
        return notification

    def default_metadata(self):
        item = self.find_item_by_pk_model(
            self.read_fixtures(("sample_environment",)),
            1, 'nailgun.node')
        return item.get('fields').get('meta')

    def _generate_random_mac(self):
        mac = [randint(0x00, 0x7f) for _ in xrange(6)]
        return ':'.join(map(lambda x: "%02x" % x, mac)).upper()

    def generate_interfaces_in_meta(self, amount):
        nics = []
        for i in xrange(amount):
            nics.append(
                {
                    'name': 'eth{0}'.format(i),
                    'mac': self._generate_random_mac(),
                    'current_speed': 100,
                    'max_speed': 1000
                }
            )
        self.set_admin_ip_for_for_single_interface(nics)
        return {'interfaces': nics}

    def _set_interfaces_if_not_set_in_meta(self, node_id, meta):
        if not meta or not 'interfaces' in meta:
            self._add_interfaces_to_node(node_id)

    def _create_interfaces_from_meta(self, node):
        # Create interfaces from meta
        for interface in node.meta['interfaces']:
            interface = NodeNICInterface(
                mac=interface.get('mac'),
                name=interface.get('name'),
                ip_addr=interface.get('ip'),
                netmask=interface.get('netmask'))

            self.db.add(interface)
            node.interfaces.append(interface)

        # If node in a cluster then add
        # allowed_networks for all interfaces
        # and assigned_networks for first interface
        if node.cluster_id:
            ng_ids = [ng.id for ng in
                      self.network_manager.get_all_cluster_networkgroups(node)]
            allowed_networks = list(self.db.query(NetworkGroup).filter(
                NetworkGroup.id.in_(ng_ids)))

            for interface in node.interfaces:
                interface.allowed_networks = allowed_networks

            node.interfaces[0].assigned_networks = allowed_networks

        self.db.commit()
        # At least one interface should have
        # same ip as mac in meta
        if node.interfaces and not \
           filter(lambda i: node.mac == i.mac, node.interfaces):

            node.interfaces[0].mac = node.mac
            self.db.commit()

    def _add_interfaces_to_node(self, node_id, count=1):
        interfaces = []
        node = self.db.query(Node).get(node_id)
        ng_ids = [ng.id for ng in
                  self.network_manager.get_all_cluster_networkgroups(node)]
        allowed_networks = list(self.db.query(NetworkGroup).filter(
            NetworkGroup.id.in_(ng_ids)))

        for i in xrange(count):
            nic_dict = {
                'node_id': node_id,
                'name': 'eth{0}'.format(i),
                'mac': self._generate_random_mac(),
                'current_speed': 100,
                'max_speed': 1000,
                'allowed_networks': allowed_networks,
                'assigned_networks': allowed_networks
            }

            interface = NodeNICInterface()
            for k, v in nic_dict.iteritems():
                setattr(interface, k, v)

            self.db.add(interface)
            self.db.commit()

            interfaces.append(interface)

        return interfaces

    def set_admin_ip_for_for_single_interface(self, interfaces):
        """Set admin ip for single interface if it not setted yet."""
        ips = [interface.get('ip') for interface in interfaces]
        admin_ips = [
            ip for ip in ips
            if self.network_manager.is_ip_belongs_to_admin_subnet(ip)]

        if not admin_ips:
            admin_cidr = self.network_manager.get_admin_network().cidr
            interfaces[0]['ip'] = str(IPNetwork(admin_cidr).ip)

    def set_interfaces_in_meta(self, meta, interfaces):
        """Set interfaces in metadata."""
        meta['interfaces'] = interfaces
        self.set_admin_ip_for_for_single_interface(meta['interfaces'])
        return meta['interfaces']

    def generate_ui_networks(self, cluster_id):
        start_id = self.db.query(NetworkGroup.id).order_by(
            NetworkGroup.id
        ).first()
        start_id = 0 if not start_id else start_id[-1] + 1
        net_names = (
            "floating",
            "public",
            "management",
            "storage",
            "fixed"
        )
        net_cidrs = (
            "172.16.0.0/24",
            "172.16.1.0/24",
            "192.168.0.0/24",
            "192.168.1.0/24",
            "10.0.0.0/24"
        )
        nets = {'networks': [{
            "network_size": 256,
            "name": nd[0],
            "amount": 1,
            "cluster_id": cluster_id,
            "vlan_start": 100 + i,
            "cidr": nd[1],
            "id": start_id + i
        } for i, nd in enumerate(zip(net_names, net_cidrs))]}

        public = filter(
            lambda net: net['name'] == 'public',
            nets['networks'])[0]
        public['netmask'] = '255.255.255.0'

        return nets

    def get_default_roles(self):
        return ['controller', 'compute', 'cinder', 'ceph-osd']

    def get_default_volumes_metadata(self):
        return self.read_fixtures(
            ('openstack',))[0]['fields']['volumes_metadata']

    def get_default_networks_metadata(self):
        return {
            "nova_network": {
                "networks": [
                    {
                        "name": "floating",
                        "cidr": "172.16.0.0/24",
                        "netmask": "255.255.255.0",
                        "gateway": "172.16.0.1",
                        "ip_range": ["172.16.0.128", "172.16.0.254"],
                        "vlan_start": 100,
                        "network_size": 256,
                        "assign_vip": False
                    },
                    {
                        "name": "public",
                        "cidr": "172.16.0.0/24",
                        "netmask": "255.255.255.0",
                        "gateway": "172.16.0.1",
                        "ip_range": ["172.16.0.2", "172.16.0.127"],
                        "vlan_start": 100,
                        "assign_vip": True
                    },
                    {
                        "name": "management",
                        "cidr": "192.168.0.0/24",
                        "netmask": "255.255.255.0",
                        "gateway": "192.168.0.1",
                        "ip_range": ["192.168.0.1", "192.168.0.254"],
                        "vlan_start": 101,
                        "assign_vip": True
                    },
                    {
                        "name": "storage",
                        "cidr": "192.168.1.0/24",
                        "netmask": "255.255.255.0",
                        "gateway": "192.168.1.1",
                        "ip_range": ["192.168.1.1", "192.168.1.254"],
                        "vlan_start": 102,
                        "assign_vip": False
                    },
                    {
                        "name": "fixed",
                        "cidr": "10.0.0.0/16",
                        "netmask": "255.255.0.0",
                        "gateway": "10.0.0.1",
                        "ip_range": ["10.0.0.2", "10.0.255.254"],
                        "vlan_start": 103,
                        "assign_vip": False
                    }
                ]
            },
            "neutron": {
                "networks": [
                    {
                        "name": "public",
                        "pool": ["172.16.0.0/12"]
                    },
                    {
                        "name": "management",
                        "pool": ["192.168.0.0/16"]
                    },
                    {
                        "name": "storage",
                        "pool": ["192.168.0.0/16"]
                    }
                ],
                "config": {
                    "parameters": {
                        "amqp": {
                            "provider": "rabbitmq",
                            "username": None,
                            "passwd": "",
                            "hosts": "hostname1:5672, hostname2:5672"
                        },
                        "database": {
                            "provider": "mysql",
                            "port": "3306",
                            "database": None,
                            "username": None,
                            "passwd": ""
                        },
                        "keystone": {
                            "admin_user": None,
                            "admin_password": ""
                        },
                        "metadata": {
                            "metadata_proxy_shared_secret": ""
                        }
                    }
                }
            }
        }

    def get_default_attributes_metadata(self):
        return self.read_fixtures(
            ['openstack'])[0]['fields']['attributes_metadata']

    def upload_fixtures(self, fxtr_names):
        for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
            with open(fxtr_path, "r") as fxtr_file:
                upload_fixture(fxtr_file)

    def read_fixtures(self, fxtr_names):
        data = []
        for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
            with open(fxtr_path, "r") as fxtr_file:
                try:
                    data.extend(json.load(fxtr_file))
                except Exception as exc:
                    logging.error(
                        'Error "%s" occurred while loading '
                        'fixture %s' % (exc, fxtr_path)
                    )
        return data

    def fxtr_paths_by_names(self, fxtr_names):
        for fxtr in fxtr_names:
            fxtr_path = os.path.join(
                self.fixture_dir,
                "%s.json" % fxtr
            )

            if not os.path.exists(fxtr_path):
                logging.warning(
                    "Fixture file was not found: %s",
                    fxtr_path
                )
                break
            else:
                logging.debug(
                    "Fixture file is found, yielding path: %s",
                    fxtr_path
                )
                yield fxtr_path

    def find_item_by_pk_model(self, data, pk, model):
        for item in data:
            if item.get('pk') == pk and item.get('model') == model:
                return item

    def launch_deployment(self):
        if self.clusters:
            resp = self.app.put(
                reverse(
                    'ClusterChangesHandler',
                    kwargs={'cluster_id': self.clusters[0].id}),
                headers=self.default_headers)
            self.tester.assertEquals(200, resp.status)
            response = json.loads(resp.body)
            return self.db.query(Task).filter_by(
                uuid=response['uuid']
            ).first()
        else:
            raise NotImplementedError(
                "Nothing to deploy - try creating cluster"
            )

    def launch_verify_networks(self, data=None):
        if self.clusters:
            if data:
                nets = json.dumps(data)
            else:
                resp = self.app.get(
                    reverse(
                        'NovaNetworkConfigurationHandler',
                        kwargs={'cluster_id': self.clusters[0].id}
                    ),
                    headers=self.default_headers
                )
                self.tester.assertEquals(200, resp.status)
                nets = resp.body

            resp = self.app.put(
                reverse(
                    'NovaNetworkConfigurationVerifyHandler',
                    kwargs={'cluster_id': self.clusters[0].id}),
                nets,
                headers=self.default_headers
            )
            self.tester.assertEquals(200, resp.status)
            response = json.loads(resp.body)
            task_uuid = response['uuid']
            return self.db.query(Task).filter_by(uuid=task_uuid).first()
        else:
            raise NotImplementedError(
                "Nothing to verify - try creating cluster"
            )

    def refresh_nodes(self):
        for n in self.nodes[:]:
            try:
                self.db.add(n)
                self.db.refresh(n)
            except Exception:
                self.nodes.remove(n)

    def refresh_clusters(self):
        for n in self.clusters[:]:
            try:
                self.db.refresh(n)
            except Exception:
                self.nodes.remove(n)

    def _wait_task(self, task, timeout, message):
        timer = time.time()
        while task.status == 'running':
            self.db.refresh(task)
            if time.time() - timer > timeout:
                raise Exception(
                    "Task '{0}' seems to be hanged".format(
                        task.name
                    )
                )
            time.sleep(1)
        self.tester.assertEquals(task.progress, 100)
        if isinstance(message, type(re.compile("regexp"))):
            self.tester.assertIsNotNone(re.match(message, task.message))
        elif isinstance(message, str):
            self.tester.assertEquals(task.message, message)

    def wait_ready(self, task, timeout=60, message=None):
        self._wait_task(task, timeout, message)
        self.tester.assertEquals(task.status, 'ready')

    def wait_error(self, task, timeout=60, message=None):
        self._wait_task(task, timeout, message)
        self.tester.assertEquals(task.status, 'error')

    def wait_for_nodes_status(self, nodes, status):
        def check_statuses():
            self.refresh_nodes()

            nodes_with_status = filter(
                lambda x: x.status in status,
                nodes)

            return len(nodes) == len(nodes_with_status)

        self.wait_for_true(
            check_statuses,
            error_message='Something wrong with the statuses')

    def wait_for_true(self, check, args=[], kwargs={},
                      timeout=60, error_message='Timeout error'):

        start_time = time.time()

        while True:
            result = check(*args, **kwargs)
            if result:
                return result
            if time.time() - start_time > timeout:
                raise TimeoutError(error_message)
            time.sleep(0.1)

    def _api_get(self, method, instance_id, expect_errors=False):
        return self.app.get(
            reverse(method,
                    kwargs=instance_id),
            headers=self.default_headers,
            expect_errors=expect_errors)

    def _api_put(self, method, instance_id, data, expect_errors=False):
        return self.app.put(
            reverse(method,
                    kwargs=instance_id),
            json.dumps(data),
            headers=self.default_headers,
            expect_errors=expect_errors)

    def nova_networks_get(self, cluster_id, expect_errors=False):
        return self._api_get('NovaNetworkConfigurationHandler',
                             {'cluster_id': cluster_id},
                             expect_errors)

    def nova_networks_put(self, cluster_id, networks, expect_errors=False):
        return self._api_put('NovaNetworkConfigurationHandler',
                             {'cluster_id': cluster_id},
                             networks,
                             expect_errors)

    def neutron_networks_get(self, cluster_id, expect_errors=False):
        return self._api_get('NeutronNetworkConfigurationHandler',
                             {'cluster_id': cluster_id},
                             expect_errors)

    def neutron_networks_put(self, cluster_id, networks, expect_errors=False):
        return self._api_put('NeutronNetworkConfigurationHandler',
                             {'cluster_id': cluster_id},
                             networks,
                             expect_errors)

    def cluster_changes_put(self, cluster_id, expect_errors=False):
        return self._api_put('ClusterChangesHandler',
                             {'cluster_id': cluster_id},
                             [],
                             expect_errors)

    def node_nics_get(self, node_id, expect_errors=False):
        return self._api_get('NodeNICsHandler',
                             {'node_id': node_id},
                             expect_errors)

    def node_collection_nics_put(self, node_id, interfaces,
                                 expect_errors=False):
        return self._api_put('NodeCollectionNICsHandler',
                             {'node_id': node_id},
                             interfaces,
                             expect_errors)
コード例 #6
0
class Environment(object):
    def __init__(self, app):
        self.db = db()
        self.app = app
        self.tester = TestCase
        self.tester.runTest = lambda a: None
        self.tester = self.tester()
        self.here = os.path.abspath(os.path.dirname(__file__))
        self.fixture_dir = os.path.join(self.here, "..", "fixtures")
        self.default_headers = {"Content-Type": "application/json"}
        self.releases = []
        self.clusters = []
        self.nodes = []
        self.network_manager = NetworkManager()

    def create(self, **kwargs):
        cluster = self.create_cluster(**kwargs.pop('cluster_kwargs', {}))
        for node_kwargs in kwargs.pop('nodes_kwargs', []):
            if "cluster_id" not in node_kwargs:
                if isinstance(cluster, dict):
                    node_kwargs["cluster_id"] = cluster["id"]
                else:
                    node_kwargs["cluster_id"] = cluster.id
            node_kwargs.setdefault("api", False)
            self.create_node(**node_kwargs)
        return cluster

    def create_release(self, api=False, **kwargs):
        version = str(randint(0, 100000000))
        release_data = {
            'name': u"release_name_" + version,
            'version': version,
            'description': u"release_desc" + version,
            'operating_system': 'CensOS',
            'roles': self.get_default_roles(),
            'networks_metadata': self.get_default_networks_metadata(),
            'attributes_metadata': self.get_default_attributes_metadata(),
            'volumes_metadata': self.get_default_volumes_metadata()
        }
        if kwargs:
            release_data.update(kwargs)
        if api:
            resp = self.app.post(reverse('ReleaseCollectionHandler'),
                                 params=json.dumps(release_data),
                                 headers=self.default_headers)
            self.tester.assertEquals(resp.status, 201)
            release = json.loads(resp.body)
            self.releases.append(self.db.query(Release).get(release['id']))
        else:
            release = Release()
            for field, value in release_data.iteritems():
                setattr(release, field, value)
            self.db.add(release)
            self.db.commit()
            self.releases.append(release)
        return release

    def download_release(self, release_id):
        release_data = {
            'license_type': 'rhsm',
            'username': '******',
            'password': '******',
            'release_id': release_id
        }

        resp = self.app.post(reverse('RedHatAccountHandler'),
                             params=json.dumps(release_data),
                             headers=self.default_headers)
        self.tester.assertEquals(resp.status, 200)
        download_task = json.loads(resp.body)
        return self.db.query(Task).get(download_task['id'])

    def create_cluster(self, api=True, exclude=None, **kwargs):
        cluster_data = {'name': 'cluster-api-' + str(randint(0, 1000000))}
        if api:
            cluster_data['release'] = self.create_release(api=False).id
        else:
            cluster_data['release'] = self.create_release(api=False)

        if kwargs:
            cluster_data.update(kwargs)

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del cluster_data[ex]
                except KeyError as err:
                    logging.warning(err)
        if api:
            resp = self.app.post(reverse('ClusterCollectionHandler'),
                                 json.dumps(cluster_data),
                                 headers=self.default_headers)
            self.tester.assertEquals(resp.status, 201)
            cluster = json.loads(resp.body)
            self.clusters.append(self.db.query(Cluster).get(cluster['id']))
        else:
            cluster = Cluster()
            for field, value in cluster_data.iteritems():
                setattr(cluster, field, value)
            self.db.add(cluster)
            self.db.commit()
            self.clusters.append(cluster)
        return cluster

    def create_node(self,
                    api=False,
                    exclude=None,
                    expect_http=201,
                    expect_message=None,
                    **kwargs):
        metadata = kwargs.get('meta')
        default_metadata = self.default_metadata()
        if metadata:
            default_metadata.update(metadata)

        mac = self._generate_random_mac()
        if default_metadata['interfaces']:
            default_metadata['interfaces'][0]['mac'] = kwargs.get('mac', mac)

        node_data = {
            'mac': mac,
            'roles': ['controller'],
            'status': 'discover',
            'meta': default_metadata
        }
        if kwargs:
            meta = kwargs.pop('meta', None)
            node_data.update(kwargs)
            if meta:
                kwargs['meta'] = meta

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del node_data[ex]
                except KeyError as err:
                    logging.warning(err)
        if api:
            resp = self.app.post(reverse('NodeCollectionHandler'),
                                 json.dumps(node_data),
                                 headers=self.default_headers,
                                 expect_errors=True)
            self.tester.assertEquals(resp.status, expect_http)
            if expect_message:
                self.tester.assertEquals(resp.body, expect_message)
            if str(expect_http)[0] != "2":
                return None
            self.tester.assertEquals(resp.status, expect_http)
            node = json.loads(resp.body)
            node_db = self.db.query(Node).get(node['id'])
            self._set_interfaces_if_not_set_in_meta(node_db.id,
                                                    kwargs.get('meta', None))
            self.nodes.append(node_db)
        else:
            node = Node()
            node.timestamp = datetime.now()
            if 'cluster_id' in node_data:
                cluster_id = node_data.pop('cluster_id')
                for cluster in self.clusters:
                    if cluster.id == cluster_id:
                        node.cluster = cluster
                        break
                else:
                    node.cluster_id = cluster_id
            for key, value in node_data.iteritems():
                setattr(node, key, value)
            node.attributes = self.create_attributes()
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            self.db.add(node)
            self.db.commit()
            if node.meta and node.meta.get('interfaces'):
                self._create_interfaces_from_meta(node)

            self.nodes.append(node)

        return node

    def create_rh_account(self, **kwargs):
        username = kwargs.pop("username", "rh_username")
        password = kwargs.pop("password", "rh_password")
        license_type = kwargs.pop("license_type", "rhsm")
        rh_account = RedHatAccount(username=username,
                                   password=password,
                                   license_type=license_type,
                                   **kwargs)
        self.db.add(rh_account)
        self.db.commit()
        return rh_account

    def create_task(self, **kwargs):
        task = Task(**kwargs)
        self.db.add(task)
        self.db.commit()
        return task

    def create_attributes(self):
        return NodeAttributes()

    def create_notification(self, **kwargs):
        notif_data = {
            "topic": "discover",
            "message": "Test message",
            "status": "unread",
            "datetime": datetime.now()
        }
        if kwargs:
            notif_data.update(kwargs)
        notification = Notification()
        notification.cluster_id = notif_data.get("cluster_id")
        for f, v in notif_data.iteritems():
            setattr(notification, f, v)
        self.db.add(notification)
        self.db.commit()
        return notification

    def default_metadata(self):
        item = self.find_item_by_pk_model(
            self.read_fixtures(("sample_environment", )), 1, 'nailgun.node')
        return item.get('fields').get('meta')

    def _generate_random_mac(self):
        mac = [randint(0x00, 0x7f) for _ in xrange(6)]
        return ':'.join(map(lambda x: "%02x" % x, mac)).upper()

    def generate_interfaces_in_meta(self, amount):
        nics = []
        for i in xrange(amount):
            nics.append({
                'name': 'eth{0}'.format(i),
                'mac': self._generate_random_mac(),
                'current_speed': 100,
                'max_speed': 1000
            })
        self.set_admin_ip_for_for_single_interface(nics)
        return {'interfaces': nics}

    def _set_interfaces_if_not_set_in_meta(self, node_id, meta):
        if not meta or not 'interfaces' in meta:
            self._add_interfaces_to_node(node_id)

    def _create_interfaces_from_meta(self, node):
        # Create interfaces from meta
        for interface in node.meta['interfaces']:
            interface = NodeNICInterface(mac=interface.get('mac'),
                                         name=interface.get('name'),
                                         ip_addr=interface.get('ip'),
                                         netmask=interface.get('netmask'))

            self.db.add(interface)
            node.interfaces.append(interface)

        # If node in a cluster then add
        # allowed_networks for all interfaces
        # and assigned_networks for first interface
        if node.cluster_id:
            ng_ids = [
                ng.id for ng in
                self.network_manager.get_all_cluster_networkgroups(node)
            ]
            allowed_networks = list(
                self.db.query(NetworkGroup).filter(
                    NetworkGroup.id.in_(ng_ids)))

            for interface in node.interfaces:
                interface.allowed_networks = allowed_networks

            node.interfaces[0].assigned_networks = allowed_networks

        self.db.commit()
        # At least one interface should have
        # same ip as mac in meta
        if node.interfaces and not \
           filter(lambda i: node.mac == i.mac, node.interfaces):

            node.interfaces[0].mac = node.mac
            self.db.commit()

    def _add_interfaces_to_node(self, node_id, count=1):
        interfaces = []
        node = self.db.query(Node).get(node_id)
        ng_ids = [
            ng.id
            for ng in self.network_manager.get_all_cluster_networkgroups(node)
        ]
        allowed_networks = list(
            self.db.query(NetworkGroup).filter(NetworkGroup.id.in_(ng_ids)))

        for i in xrange(count):
            nic_dict = {
                'node_id': node_id,
                'name': 'eth{0}'.format(i),
                'mac': self._generate_random_mac(),
                'current_speed': 100,
                'max_speed': 1000,
                'allowed_networks': allowed_networks,
                'assigned_networks': allowed_networks
            }

            interface = NodeNICInterface()
            for k, v in nic_dict.iteritems():
                setattr(interface, k, v)

            self.db.add(interface)
            self.db.commit()

            interfaces.append(interface)

        return interfaces

    def set_admin_ip_for_for_single_interface(self, interfaces):
        """Set admin ip for single interface if it not setted yet."""
        ips = [interface.get('ip') for interface in interfaces]
        admin_ips = [
            ip for ip in ips
            if self.network_manager.is_ip_belongs_to_admin_subnet(ip)
        ]

        if not admin_ips:
            admin_cidr = self.network_manager.get_admin_network().cidr
            interfaces[0]['ip'] = str(IPNetwork(admin_cidr).ip)

    def set_interfaces_in_meta(self, meta, interfaces):
        """Set interfaces in metadata."""
        meta['interfaces'] = interfaces
        self.set_admin_ip_for_for_single_interface(meta['interfaces'])
        return meta['interfaces']

    def generate_ui_networks(self, cluster_id):
        start_id = self.db.query(NetworkGroup.id).order_by(
            NetworkGroup.id).first()
        start_id = 0 if not start_id else start_id[-1] + 1
        net_names = ("floating", "public", "management", "storage", "fixed")
        net_cidrs = ("172.16.0.0/24", "172.16.1.0/24", "192.168.0.0/24",
                     "192.168.1.0/24", "10.0.0.0/24")
        nets = {
            'networks': [{
                "network_size": 256,
                "name": nd[0],
                "amount": 1,
                "cluster_id": cluster_id,
                "vlan_start": 100 + i,
                "cidr": nd[1],
                "id": start_id + i
            } for i, nd in enumerate(zip(net_names, net_cidrs))]
        }

        public = filter(lambda net: net['name'] == 'public',
                        nets['networks'])[0]
        public['netmask'] = '255.255.255.0'

        return nets

    def get_default_roles(self):
        return ['controller', 'compute', 'cinder', 'ceph-osd']

    def get_default_volumes_metadata(self):
        return self.read_fixtures(
            ('openstack', ))[0]['fields']['volumes_metadata']

    def get_default_networks_metadata(self):
        return {
            "nova_network": {
                "networks": [{
                    "name": "floating",
                    "cidr": "172.16.0.0/24",
                    "netmask": "255.255.255.0",
                    "gateway": "172.16.0.1",
                    "ip_range": ["172.16.0.128", "172.16.0.254"],
                    "vlan_start": 100,
                    "network_size": 256,
                    "assign_vip": False
                }, {
                    "name": "public",
                    "cidr": "172.16.0.0/24",
                    "netmask": "255.255.255.0",
                    "gateway": "172.16.0.1",
                    "ip_range": ["172.16.0.2", "172.16.0.127"],
                    "vlan_start": 100,
                    "assign_vip": True
                }, {
                    "name": "management",
                    "cidr": "192.168.0.0/24",
                    "netmask": "255.255.255.0",
                    "gateway": "192.168.0.1",
                    "ip_range": ["192.168.0.1", "192.168.0.254"],
                    "vlan_start": 101,
                    "assign_vip": True
                }, {
                    "name": "storage",
                    "cidr": "192.168.1.0/24",
                    "netmask": "255.255.255.0",
                    "gateway": "192.168.1.1",
                    "ip_range": ["192.168.1.1", "192.168.1.254"],
                    "vlan_start": 102,
                    "assign_vip": False
                }, {
                    "name": "fixed",
                    "cidr": "10.0.0.0/16",
                    "netmask": "255.255.0.0",
                    "gateway": "10.0.0.1",
                    "ip_range": ["10.0.0.2", "10.0.255.254"],
                    "vlan_start": 103,
                    "assign_vip": False
                }]
            },
            "neutron": {
                "networks": [{
                    "name": "public",
                    "pool": ["172.16.0.0/12"]
                }, {
                    "name": "management",
                    "pool": ["192.168.0.0/16"]
                }, {
                    "name": "storage",
                    "pool": ["192.168.0.0/16"]
                }],
                "config": {
                    "parameters": {
                        "amqp": {
                            "provider": "rabbitmq",
                            "username": None,
                            "passwd": "",
                            "hosts": "hostname1:5672, hostname2:5672"
                        },
                        "database": {
                            "provider": "mysql",
                            "port": "3306",
                            "database": None,
                            "username": None,
                            "passwd": ""
                        },
                        "keystone": {
                            "admin_user": None,
                            "admin_password": ""
                        },
                        "metadata": {
                            "metadata_proxy_shared_secret": ""
                        }
                    }
                }
            }
        }

    def get_default_attributes_metadata(self):
        return self.read_fixtures(['openstack'
                                   ])[0]['fields']['attributes_metadata']

    def upload_fixtures(self, fxtr_names):
        for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
            with open(fxtr_path, "r") as fxtr_file:
                upload_fixture(fxtr_file)

    def read_fixtures(self, fxtr_names):
        data = []
        for fxtr_path in self.fxtr_paths_by_names(fxtr_names):
            with open(fxtr_path, "r") as fxtr_file:
                try:
                    data.extend(json.load(fxtr_file))
                except Exception as exc:
                    logging.error('Error "%s" occurred while loading '
                                  'fixture %s' % (exc, fxtr_path))
        return data

    def fxtr_paths_by_names(self, fxtr_names):
        for fxtr in fxtr_names:
            fxtr_path = os.path.join(self.fixture_dir, "%s.json" % fxtr)

            if not os.path.exists(fxtr_path):
                logging.warning("Fixture file was not found: %s", fxtr_path)
                break
            else:
                logging.debug("Fixture file is found, yielding path: %s",
                              fxtr_path)
                yield fxtr_path

    def find_item_by_pk_model(self, data, pk, model):
        for item in data:
            if item.get('pk') == pk and item.get('model') == model:
                return item

    def launch_deployment(self):
        if self.clusters:
            resp = self.app.put(reverse(
                'ClusterChangesHandler',
                kwargs={'cluster_id': self.clusters[0].id}),
                                headers=self.default_headers)
            self.tester.assertEquals(200, resp.status)
            response = json.loads(resp.body)
            return self.db.query(Task).filter_by(uuid=response['uuid']).first()
        else:
            raise NotImplementedError(
                "Nothing to deploy - try creating cluster")

    def launch_verify_networks(self, data=None):
        if self.clusters:
            if data:
                nets = json.dumps(data)
            else:
                resp = self.app.get(reverse(
                    'NovaNetworkConfigurationHandler',
                    kwargs={'cluster_id': self.clusters[0].id}),
                                    headers=self.default_headers)
                self.tester.assertEquals(200, resp.status)
                nets = resp.body

            resp = self.app.put(reverse(
                'NovaNetworkConfigurationVerifyHandler',
                kwargs={'cluster_id': self.clusters[0].id}),
                                nets,
                                headers=self.default_headers)
            self.tester.assertEquals(200, resp.status)
            response = json.loads(resp.body)
            task_uuid = response['uuid']
            return self.db.query(Task).filter_by(uuid=task_uuid).first()
        else:
            raise NotImplementedError(
                "Nothing to verify - try creating cluster")

    def refresh_nodes(self):
        for n in self.nodes[:]:
            try:
                self.db.add(n)
                self.db.refresh(n)
            except Exception:
                self.nodes.remove(n)

    def refresh_clusters(self):
        for n in self.clusters[:]:
            try:
                self.db.refresh(n)
            except Exception:
                self.nodes.remove(n)

    def _wait_task(self, task, timeout, message):
        timer = time.time()
        while task.status == 'running':
            self.db.refresh(task)
            if time.time() - timer > timeout:
                raise Exception("Task '{0}' seems to be hanged".format(
                    task.name))
            time.sleep(1)
        self.tester.assertEquals(task.progress, 100)
        if isinstance(message, type(re.compile("regexp"))):
            self.tester.assertIsNotNone(re.match(message, task.message))
        elif isinstance(message, str):
            self.tester.assertEquals(task.message, message)

    def wait_ready(self, task, timeout=60, message=None):
        self._wait_task(task, timeout, message)
        self.tester.assertEquals(task.status, 'ready')

    def wait_error(self, task, timeout=60, message=None):
        self._wait_task(task, timeout, message)
        self.tester.assertEquals(task.status, 'error')

    def wait_for_nodes_status(self, nodes, status):
        def check_statuses():
            self.refresh_nodes()

            nodes_with_status = filter(lambda x: x.status in status, nodes)

            return len(nodes) == len(nodes_with_status)

        self.wait_for_true(check_statuses,
                           error_message='Something wrong with the statuses')

    def wait_for_true(self,
                      check,
                      args=[],
                      kwargs={},
                      timeout=60,
                      error_message='Timeout error'):

        start_time = time.time()

        while True:
            result = check(*args, **kwargs)
            if result:
                return result
            if time.time() - start_time > timeout:
                raise TimeoutError(error_message)
            time.sleep(0.1)

    def _api_get(self, method, instance_id, expect_errors=False):
        return self.app.get(reverse(method, kwargs=instance_id),
                            headers=self.default_headers,
                            expect_errors=expect_errors)

    def _api_put(self, method, instance_id, data, expect_errors=False):
        return self.app.put(reverse(method, kwargs=instance_id),
                            json.dumps(data),
                            headers=self.default_headers,
                            expect_errors=expect_errors)

    def nova_networks_get(self, cluster_id, expect_errors=False):
        return self._api_get('NovaNetworkConfigurationHandler',
                             {'cluster_id': cluster_id}, expect_errors)

    def nova_networks_put(self, cluster_id, networks, expect_errors=False):
        return self._api_put('NovaNetworkConfigurationHandler',
                             {'cluster_id': cluster_id}, networks,
                             expect_errors)

    def neutron_networks_get(self, cluster_id, expect_errors=False):
        return self._api_get('NeutronNetworkConfigurationHandler',
                             {'cluster_id': cluster_id}, expect_errors)

    def neutron_networks_put(self, cluster_id, networks, expect_errors=False):
        return self._api_put('NeutronNetworkConfigurationHandler',
                             {'cluster_id': cluster_id}, networks,
                             expect_errors)

    def cluster_changes_put(self, cluster_id, expect_errors=False):
        return self._api_put('ClusterChangesHandler',
                             {'cluster_id': cluster_id}, [], expect_errors)

    def node_nics_get(self, node_id, expect_errors=False):
        return self._api_get('NodeNICsHandler', {'node_id': node_id},
                             expect_errors)

    def node_collection_nics_put(self,
                                 node_id,
                                 interfaces,
                                 expect_errors=False):
        return self._api_put('NodeCollectionNICsHandler', {'node_id': node_id},
                             interfaces, expect_errors)