Exemple #1
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()
        if 'net_manager' in network_configuration:
            setattr(cluster, 'net_manager',
                    network_configuration['net_manager'])

        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = orm().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        # deleting old ip ranges
                        map(
                            orm().delete,
                            orm().query(IPAddrRange).filter_by(
                                network_group_id=ng['id']))
                        for r in value:
                            new_ip_range = IPAddrRange(
                                first=r[0],
                                last=r[1],
                                network_group_id=ng['id'])
                            orm().add(new_ip_range)
                            orm().commit()
                    else:
                        setattr(ng_db, key, value)

                network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')
Exemple #2
0
    def get_default(self, node):
        nics = []
        network_manager = NetworkManager()
        for nic in node.interfaces:
            nic_dict = {
                "id": nic.id,
                "name": nic.name,
                "mac": nic.mac,
                "max_speed": nic.max_speed,
                "current_speed": nic.current_speed
            }

            assigned_ng_ids = network_manager.get_default_nic_networkgroups(
                node.id,
                nic.id
            )
            for ng_id in assigned_ng_ids:
                ng = db().query(NetworkGroup).get(ng_id)
                nic_dict.setdefault("assigned_networks", []).append(
                    {"id": ng_id, "name": ng.name}
                )

            allowed_ng_ids = network_manager.get_allowed_nic_networkgroups(
                node.id,
                nic.id
            )
            for ng_id in allowed_ng_ids:
                ng = db().query(NetworkGroup).get(ng_id)
                nic_dict.setdefault("allowed_networks", []).append(
                    {"id": ng_id, "name": ng.name}
                )

            nics.append(nic_dict)
        return nics
Exemple #3
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()
        if 'net_manager' in network_configuration:
            setattr(
                cluster,
                'net_manager',
                network_configuration['net_manager'])

        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = db().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        cls.__set_ip_ranges(ng['id'], value)
                    else:
                        if key == 'cidr' and \
                                not ng['name'] in ('public', 'floating'):
                            network_manager.update_ranges_from_cidr(
                                ng_db, value)

                        setattr(ng_db, key, value)

                network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')
Exemple #4
0
    def run(self):
        super(FakeDeletionThread, self).run()
        receiver = NailgunReceiver
        kwargs = {
            'task_uuid': self.task_uuid,
            'nodes': self.data['args']['nodes'],
            'status': 'ready'
        }
        nodes_to_restore = self.data['args'].get('nodes_to_restore', [])
        resp_method = getattr(receiver, self.respond_to)
        resp_method(**kwargs)

        for node_data in nodes_to_restore:
            node = Node(**node_data)

            # Offline node just deleted from db
            # and could not recreated with status
            # discover
            if not node.online:
                continue

            node.status = 'discover'
            db().add(node)
            db().commit()
            node.attributes = NodeAttributes(node_id=node.id)
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            NetworkManager.update_interfaces_info(node)
            db().commit()

            ram = round(node.meta.get('ram') or 0, 1)
            cores = node.meta.get('cores') or 'unknown'
            notifier.notify("discover",
                            "New node with %s CPU core(s) "
                            "and %s GB memory is discovered" %
                            (cores, ram), node_id=node.id)
Exemple #5
0
    def PUT(self, cluster_id):
        cluster = self.get_object_or_404(Cluster, cluster_id)
        data = self.checked_data()
        network_manager = NetworkManager()

        for key, value in data.iteritems():
            if key == "nodes":
                # Todo: sepatate nodes for deletion and addition by set().
                new_nodes = db().query(Node).filter(Node.id.in_(value))
                nodes_to_remove = [
                    n for n in cluster.nodes if n not in new_nodes
                ]
                nodes_to_add = [n for n in new_nodes if n not in cluster.nodes]
                for node in nodes_to_add:
                    if not node.online:
                        raise web.badrequest(
                            "Can not add offline node to cluster")
                map(cluster.nodes.remove, nodes_to_remove)
                map(cluster.nodes.append, nodes_to_add)
                for node in nodes_to_remove:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
                for node in nodes_to_add:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id)
                    network_manager.assign_networks_to_main_interface(node.id)
            else:
                setattr(cluster, key, value)
        db().commit()
        return self.render(cluster)
Exemple #6
0
    def PUT(self, node_id):
        """:returns: JSONized Node object.
        :http: * 200 (OK)
               * 400 (invalid node data specified)
               * 404 (node not found in db)
        """
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager()

        old_cluster_id = node.cluster_id

        if data.get("pending_roles") == [] and node.cluster:
            node.cluster.clear_pending_changes(node_id=node.id)

        if "cluster_id" in data:
            if data["cluster_id"] is None and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)
                node.roles = node.pending_roles = []
            node.cluster_id = data["cluster_id"]
            if node.cluster_id != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if node.cluster_id:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)

        regenerate_volumes = any(
            ('roles' in data
             and set(data['roles']) != set(node.roles), 'pending_roles' in data
             and set(data['pending_roles']) != set(node.pending_roles),
             node.cluster_id != old_cluster_id))

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            # and updated cluster_id before all other fields
            if key in ("id", "cluster_id"):
                continue
            setattr(node, key, value)

        if not node.status in ('provisioning',
                               'deploying') and regenerate_volumes:
            try:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (u"Failed to generate volumes "
                       "info for node '{0}': '{1}'").format(
                           node.name or data.get("mac") or data.get("id"),
                           str(exc) or "see logs for details")
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)
Exemple #7
0
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin IPs
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(node.id)
Exemple #8
0
    def get_default(self, node):
        nics = []
        network_manager = NetworkManager()
        for nic in node.interfaces:
            nic_dict = {
                "id": nic.id,
                "name": nic.name,
                "mac": nic.mac,
                "max_speed": nic.max_speed,
                "current_speed": nic.current_speed
            }

            assigned_ngs = network_manager.get_default_nic_networkgroups(
                node, nic)

            for ng in assigned_ngs:
                nic_dict.setdefault('assigned_networks', []).append(
                    {'id': ng.id, 'name': ng.name})

            allowed_ngs = network_manager.get_allowed_nic_networkgroups(
                node, nic)

            for ng in allowed_ngs:
                nic_dict.setdefault('allowed_networks', []).append(
                    {'id': ng.id, 'name': ng.name})

            nics.append(nic_dict)
        return nics
Exemple #9
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()
        if 'net_manager' in network_configuration:
            setattr(
                cluster,
                'net_manager',
                network_configuration['net_manager'])

        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = orm().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        # deleting old ip ranges
                        map(
                            orm().delete,
                            orm().query(IPAddrRange).filter_by(
                                network_group_id=ng['id']
                            )
                        )
                        for r in value:
                            new_ip_range = IPAddrRange(
                                first=r[0],
                                last=r[1],
                                network_group_id=ng['id']
                            )
                            orm().add(new_ip_range)
                            orm().commit()
                    else:
                        setattr(ng_db, key, value)

                network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')
Exemple #10
0
    def get_common_attrs(cls, cluster):
        """Common attributes for all facts
        """
        common_attrs = super(OrchestratorHASerializer, cls).get_common_attrs(
            cluster)

        netmanager = NetworkManager()
        common_attrs['management_vip'] = netmanager.assign_vip(
            cluster.id, 'management')
        common_attrs['public_vip'] = netmanager.assign_vip(
            cluster.id, 'public')

        sorted_nodes = sorted(
            common_attrs['nodes'], key=lambda node: node['uid'])

        controller_nodes = cls.filter_by_roles(
            sorted_nodes, ['controller', 'primary-controller'])
        common_attrs['last_controller'] = controller_nodes[-1]['name']

        # Assign primary controller in nodes list
        cls.set_primary_controller(common_attrs['nodes'])

        common_attrs['mp'] = [
            {'point': '1', 'weight': '1'},
            {'point': '2', 'weight': '2'}]

        return common_attrs
Exemple #11
0
    def get_common_attrs(cls, cluster):
        """Common attributes for all facts
        """
        common_attrs = super(OrchestratorHASerializer, cls).get_common_attrs(
            cluster)

        netmanager = NetworkManager()
        common_attrs['management_vip'] = netmanager.assign_vip(
            cluster.id, 'management')
        common_attrs['public_vip'] = netmanager.assign_vip(
            cluster.id, 'public')

        common_attrs['last_controller'] = sorted(
            common_attrs['controller_nodes'],
            key=lambda node: node['uid'])[-1]['name']

        first_controller = filter(
            lambda node: 'controller' in node['role'],
            common_attrs['nodes'])[0]

        first_controller['role'] = 'primary-controller'

        common_attrs['mp'] = [
            {'point': '1', 'weight': '1'},
            {'point': '2', 'weight': '2'}]

        common_attrs['mountpoints'] = '1 1\\n2 2\\n'

        return common_attrs
    def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_manager'] = cluster.net_manager
        result['networks'] = map(
            cls.serialize_network_group,
            cluster.network_groups
        )

        net_manager = NetworkManager()
        result['networks'].append(
            cls.serialize_network_group(
                net_manager.get_admin_network_group()
            )
        )
        if cluster.dns_nameservers:
            result['dns_nameservers'] = {
                "nameservers": cluster.dns_nameservers
            }

        if cluster.is_ha_mode:
            nw_metadata = cluster.release.networks_metadata["nova_network"]
            for network in nw_metadata["networks"]:
                if network.get("assign_vip") is not False:
                    result['{0}_vip'.format(
                        network["name"]
                    )] = net_manager.assign_vip(
                        cluster.id,
                        network["name"]
                    )
        return result
    def get_common_attrs(cls, cluster):
        """Common attributes for all facts
        """
        common_attrs = super(
            NovaOrchestratorHASerializer,
            cls
        ).get_common_attrs(cluster)

        netmanager = NetworkManager()
        common_attrs['management_vip'] = netmanager.assign_vip(
            cluster.id, 'management')
        common_attrs['public_vip'] = netmanager.assign_vip(
            cluster.id, 'public')

        sorted_nodes = sorted(
            common_attrs['nodes'], key=lambda node: int(node['uid']))

        controller_nodes = cls.filter_by_roles(
            sorted_nodes, ['controller', 'primary-controller'])
        common_attrs['last_controller'] = controller_nodes[-1]['name']

        # Assign primary controller in nodes list
        cls.set_primary_controller(common_attrs['nodes'])

        common_attrs['mp'] = [
            {'point': '1', 'weight': '1'},
            {'point': '2', 'weight': '2'}]

        return common_attrs
Exemple #14
0
 def render(cls, instance, fields=None):
     json_data = JSONHandler.render(instance, fields=cls.fields)
     network_manager = NetworkManager()
     json_data['network_data'] = network_manager.get_node_networks(
         instance.id
     )
     return json_data
Exemple #15
0
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin IPs
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(node.id)
Exemple #16
0
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        for node in TaskHelper.nodes_to_provision(self):
            netmanager.assign_admin_ips(
                node.id, len(node.meta.get('interfaces', [])))
Exemple #17
0
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin ips
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(
             node.id, len(node.meta.get('interfaces', [])))
Exemple #18
0
 def prepare_for_provisioning(cls, nodes):
     """Prepare environment for provisioning,
     update fqdns, assign admin ips
     """
     cls.update_slave_nodes_fqdn(nodes)
     for node in nodes:
         NetworkManager.assign_admin_ips(
             node.id, len(node.meta.get('interfaces', [])))
Exemple #19
0
 def __format_node_for_naily(cls, n):
     netmanager = NetworkManager()
     return {
         'id': n.id, 'status': n.status, 'error_type': n.error_type,
         'uid': n.id, 'ip': n.ip, 'mac': n.mac, 'role': n.role,
         'fqdn': n.fqdn, 'progress': n.progress, 'meta': n.meta,
         'network_data': netmanager.get_node_networks(n.id),
         'online': n.online
     }
Exemple #20
0
 def render(cls, instance, fields=None):
     json_data = None
     try:
         json_data = JSONHandler.render(instance, fields=cls.fields)
         network_manager = NetworkManager()
         json_data["network_data"] = network_manager.get_node_networks(instance.id)
     except Exception:
         logger.error(traceback.format_exc())
     return json_data
Exemple #21
0
 def __format_node_for_naily(cls, n):
     netmanager = NetworkManager()
     return {
         'id': n.id, 'status': n.status, 'error_type': n.error_type,
         'uid': n.id, 'ip': n.ip, 'mac': n.mac, 'role': n.role,
         'fqdn': n.fqdn, 'progress': n.progress, 'meta': n.meta,
         'network_data': netmanager.get_node_networks(n.id),
         'online': n.online
     }
    def get_admin_ips(cls, node):
        netmanager = NetworkManager()
        admin_net_id = netmanager.get_admin_network_id()
        admin_ips = set([
            i.ip_addr for i in db().query(IPAddr).
            filter_by(node=node.id).
            filter_by(network=admin_net_id)])

        return admin_ips
Exemple #23
0
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        nodes = TaskHelper.nodes_to_provision(self)
        TaskHelper.update_slave_nodes_fqdn(nodes)
        for node in nodes:
            netmanager.assign_admin_ips(node.id, len(node.meta.get("interfaces", [])))
Exemple #24
0
    def get_admin_ips(cls, node):
        netmanager = NetworkManager()
        admin_net_id = netmanager.get_admin_network_id()
        admin_ips = set([
            i.ip_addr for i in db().query(IPAddr).
            filter_by(node=node.id).
            filter_by(network=admin_net_id)])

        return admin_ips
Exemple #25
0
    def _success_action(cls, task, status, progress):
        network_manager = NetworkManager()
        # check if all nodes are ready
        if any(map(lambda n: n.status == 'error', task.cluster.nodes)):
            cls._error_action(task, 'error', 100)
            return

        if task.cluster.mode in ('singlenode', 'multinode'):
            # determining horizon url - it's an IP
            # of a first cluster controller
            controller = db().query(Node).filter_by(cluster_id=task.cluster_id,
                                                    role='controller').first()
            if controller:
                logger.debug(
                    u"Controller is found, node_id=%s, "
                    "getting it's IP addresses", controller.id)
                public_net = filter(
                    lambda n: n['name'] == 'public' and 'ip' in n,
                    network_manager.get_node_networks(controller.id))
                if public_net:
                    horizon_ip = public_net[0]['ip'].split('/')[0]
                    message = (
                        u"Deployment of environment '{0}' is done. "
                        "Access the OpenStack dashboard (Horizon) at "
                        "http://{1}/ or via internal network at http://{2}/"
                    ).format(task.cluster.name, horizon_ip, controller.ip)
                else:
                    message = (
                        u"Deployment of environment '{0}' is done").format(
                            task.cluster.name)
                    logger.warning(u"Public ip for controller node "
                                   "not found in '{0}'".format(
                                       task.cluster.name))
            else:
                message = (u"Deployment of environment"
                           " '{0}' is done").format(task.cluster.name)
                logger.warning(u"Controller node not found in '{0}'".format(
                    task.cluster.name))
        elif task.cluster.mode == 'ha':
            # determining horizon url in HA mode - it's vip
            # from a public network saved in task cache
            args = task.cache.get('args')
            try:
                vip = args['attributes']['public_vip']
                message = (
                    u"Deployment of environment '{0}' is done. "
                    "Access the OpenStack dashboard (Horizon) at http://{1}/"
                ).format(task.cluster.name, vip)
            except Exception as exc:
                logger.error(": ".join([str(exc), traceback.format_exc()]))
                message = (u"Deployment of environment"
                           " '{0}' is done").format(task.cluster.name)
                logger.warning(u"Cannot find virtual IP for '{0}'".format(
                    task.cluster.name))

        notifier.notify("done", message, task.cluster_id)
        TaskHelper.update_task_status(task.uuid, status, progress, message)
    def serialize_for_cluster(cls, cluster):
        result = {}
        result["net_manager"] = cluster.net_manager
        result["networks"] = map(cls.serialize_network_group, cluster.network_groups)

        if cluster.is_ha_mode:
            net_manager = NetworkManager()
            result["management_vip"] = net_manager.assign_vip(cluster.id, "management")
            result["public_vip"] = net_manager.assign_vip(cluster.id, "public")
        return result
Exemple #27
0
 def render(cls, instance, fields=None):
     json_data = None
     try:
         json_data = JSONHandler.render(instance, fields=cls.fields)
         network_manager = NetworkManager()
         json_data['network_data'] = network_manager.get_node_networks(
             instance.id)
     except Exception:
         logger.error(traceback.format_exc())
     return json_data
Exemple #28
0
    def prepare_for_provisioning(self):
        from nailgun.network.manager import NetworkManager
        from nailgun.task.helpers import TaskHelper

        netmanager = NetworkManager()
        nodes = TaskHelper.nodes_to_provision(self)
        TaskHelper.update_slave_nodes_fqdn(nodes)
        for node in nodes:
            netmanager.assign_admin_ips(node.id,
                                        len(node.meta.get('interfaces', [])))
Exemple #29
0
    def remove_cluster_resp(cls, **kwargs):
        network_manager = NetworkManager()
        logger.info(
            "RPC method remove_cluster_resp received: %s" %
            json.dumps(kwargs)
        )
        task_uuid = kwargs.get('task_uuid')

        cls.remove_nodes_resp(**kwargs)

        task = db().query(Task).filter_by(uuid=task_uuid).first()
        cluster = task.cluster

        if task.status in ('ready',):
            logger.debug("Removing environment itself")
            cluster_name = cluster.name

            nws = itertools.chain(
                *[n.networks for n in cluster.network_groups]
            )
            ips = db().query(IPAddr).filter(
                IPAddr.network.in_([n.id for n in nws])
            )
            map(db().delete, ips)
            db().commit()

            db().delete(cluster)
            db().commit()

            # Dmitry's hack for clearing VLANs without networks
            network_manager.clear_vlans()

            notifier.notify(
                "done",
                u"Environment '%s' and all its nodes are deleted" % (
                    cluster_name
                )
            )

        elif task.status in ('error',):
            cluster.status = 'error'
            db().add(cluster)
            db().commit()
            if not task.message:
                task.message = "Failed to delete nodes:\n{0}".format(
                    cls._generate_error_message(
                        task,
                        error_types=('deletion',)
                    )
                )
            notifier.notify(
                "error",
                task.message,
                cluster.id
            )
Exemple #30
0
    def PUT(self, cluster_id):
        cluster = self.get_object_or_404(Cluster, cluster_id)
        data = self.checked_data()
        network_manager = NetworkManager()

        for key, value in data.iteritems():
            if key == "nodes":
                # Todo: sepatate nodes for deletion and addition by set().
                new_nodes = db().query(Node).filter(
                    Node.id.in_(value)
                )
                nodes_to_remove = [n for n in cluster.nodes
                                   if n not in new_nodes]
                nodes_to_add = [n for n in new_nodes
                                if n not in cluster.nodes]
                for node in nodes_to_add:
                    if not node.online:
                        raise web.badrequest(
                            "Can not add offline node to cluster")
                map(cluster.nodes.remove, nodes_to_remove)
                map(cluster.nodes.append, nodes_to_add)
                for node in nodes_to_remove:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
                for node in nodes_to_add:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id
                    )
                    network_manager.assign_networks_to_main_interface(node.id)
            else:
                setattr(cluster, key, value)
        db().commit()
        return self.render(cluster)
Exemple #31
0
 def PUT(self):
     data = self.validator.validate_collection_structure(web.data())
     network_manager = NetworkManager()
     updated_nodes_ids = []
     for node_data in data:
         self.validator.verify_data_correctness(node_data)
         node_id = network_manager._update_attrs(node_data)
         updated_nodes_ids.append(node_id)
     updated_nodes = db().query(Node).filter(
         Node.id.in_(updated_nodes_ids)).all()
     return map(self.render, updated_nodes)
Exemple #32
0
    def PUT(self, node_id):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        interfaces_data = self.checked_data(
            self.validator.validate_structure_and_data, node_id=node_id)
        node_data = {'id': node_id, 'interfaces': interfaces_data}

        NetworkManager._update_attrs(node_data)
        node = self.get_object_or_404(Node, node_id)
        return map(self.render, node.interfaces)
Exemple #33
0
 def PUT(self):
     data = self.validator.validate_collection_structure(web.data())
     network_manager = NetworkManager()
     updated_nodes_ids = []
     for node_data in data:
         self.validator.verify_data_correctness(node_data)
         node_id = network_manager._update_attrs(node_data)
         updated_nodes_ids.append(node_id)
     updated_nodes = db().query(Node).filter(
         Node.id.in_(updated_nodes_ids)
     ).all()
     return map(self.render, updated_nodes)
Exemple #34
0
    def PUT(self, node_id):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        interfaces_data = self.validator.validate_json(web.data())
        node_data = {'id': node_id, 'interfaces': interfaces_data}
        self.validator.validate(node_data)

        NetworkManager._update_attrs(node_data)
        node = self.get_object_or_404(Node, node_id)
        return map(self.render, node.interfaces)
Exemple #35
0
    def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_manager'] = cluster.net_manager
        result['networks'] = map(cls.serialize_network_group,
                                 cluster.network_groups)

        if cluster.mode == 'ha':
            net_manager = NetworkManager()
            result['management_vip'] = net_manager.assign_vip(
                cluster.id, 'management')
            result['public_vip'] = net_manager.assign_vip(cluster.id, 'public')
        return result
Exemple #36
0
    def PUT(self, node_id):
        """:returns: JSONized Node object.
        :http: * 200 (OK)
               * 400 (invalid node data specified)
               * 404 (node not found in db)
        """
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager()

        old_cluster_id = node.cluster_id

        if data.get("pending_roles") == [] and node.cluster:
            node.cluster.clear_pending_changes(node_id=node.id)

        if "cluster_id" in data:
            if data["cluster_id"] is None and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)
                node.roles = node.pending_roles = []
            node.cluster_id = data["cluster_id"]
            if node.cluster_id != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if node.cluster_id:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(node)

        regenerate_volumes = any(
            (
                "roles" in data and set(data["roles"]) != set(node.roles),
                "pending_roles" in data and set(data["pending_roles"]) != set(node.pending_roles),
                node.cluster_id != old_cluster_id,
            )
        )

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            # and updated cluster_id before all other fields
            if key in ("id", "cluster_id"):
                continue
            setattr(node, key, value)

        if not node.status in ("provisioning", "deploying") and regenerate_volumes:
            try:
                node.attributes.volumes = node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (u"Failed to generate volumes " "info for node '{0}': '{1}'").format(
                    node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details"
                )
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)
Exemple #37
0
    def get_admin_ip(cls, node):
        """Getting admin ip and assign prefix from admin network."""
        network_manager = NetworkManager()
        admin_ip = network_manager.get_admin_ips_for_interfaces(node)[
            node.admin_interface.name]
        admin_ip = IPNetwork(admin_ip)

        # Assign prefix from admin network
        admin_net = IPNetwork(network_manager.get_admin_network().cidr)
        admin_ip.prefixlen = admin_net.prefixlen

        return str(admin_ip)
Exemple #38
0
    def get_controller_nodes_ips(self, cluster):
        """
        Return admin network ips
        """
        network_manager = NetworkManager()
        admin_net_id = network_manager.get_admin_network_id()
        ip_addrs = []
        for node in self.controllers(cluster):
            ip_addr = db().query(IPAddr).filter_by(node=node.id, network=admin_net_id).first().ip_addr
            ip_addrs.append(ip_addr)

        return ip_addrs
    def get_admin_ip(cls, node):
        """Getting admin ip and assign prefix from admin network."""
        network_manager = NetworkManager()
        admin_ip = network_manager.get_admin_ips_for_interfaces(
            node)[node.admin_interface.name]
        admin_ip = IPNetwork(admin_ip)

        # Assign prefix from admin network
        admin_net = IPNetwork(network_manager.get_admin_network().cidr)
        admin_ip.prefixlen = admin_net.prefixlen

        return str(admin_ip)
Exemple #40
0
    def _success_action(cls, task, status, progress):
        network_manager = NetworkManager()
        # check if all nodes are ready
        if any(map(lambda n: n.status == "error", task.cluster.nodes)):
            cls._error_action(task, "error", 100)
            return

        if task.cluster.mode in ("singlenode", "multinode"):
            # determining horizon url - it's an IP
            # of a first cluster controller
            controller = (
                db()
                .query(Node)
                .filter_by(cluster_id=task.cluster_id)
                .filter(Node.role_list.any(name="controller"))
                .first()
            )
            if controller:
                logger.debug(u"Controller is found, node_id=%s, " "getting it's IP addresses", controller.id)
                public_net = filter(
                    lambda n: n["name"] == "public" and "ip" in n, network_manager.get_node_networks(controller.id)
                )
                if public_net:
                    horizon_ip = public_net[0]["ip"].split("/")[0]
                    message = (
                        u"Deployment of environment '{0}' is done. "
                        "Access the OpenStack dashboard (Horizon) at "
                        "http://{1}/ or via internal network at http://{2}/"
                    ).format(task.cluster.name, horizon_ip, controller.ip)
                else:
                    message = (u"Deployment of environment '{0}' is done").format(task.cluster.name)
                    logger.warning(u"Public ip for controller node " "not found in '{0}'".format(task.cluster.name))
            else:
                message = (u"Deployment of environment" " '{0}' is done").format(task.cluster.name)
                logger.warning(u"Controller node not found in '{0}'".format(task.cluster.name))
        elif task.cluster.mode == "ha":
            # determining horizon url in HA mode - it's vip
            # from a public network saved in task cache
            args = task.cache.get("args")
            try:
                vip = args["attributes"]["public_vip"]
                message = (
                    u"Deployment of environment '{0}' is done. "
                    "Access the OpenStack dashboard (Horizon) at http://{1}/"
                ).format(task.cluster.name, vip)
            except Exception as exc:
                logger.error(": ".join([str(exc), traceback.format_exc()]))
                message = (u"Deployment of environment" " '{0}' is done").format(task.cluster.name)
                logger.warning(u"Cannot find virtual IP for '{0}'".format(task.cluster.name))

        notifier.notify("done", message, task.cluster_id)
        TaskHelper.update_task_status(task.uuid, status, progress, message)
    def add_vlan_interfaces(cls, nodes):
        """Assign fixed_interfaces and vlan_interface.
        They should be equal.
        """
        netmanager = NetworkManager()
        for node in nodes:
            node_db = db().query(Node).get(node['uid'])

            fixed_interface = netmanager._get_interface_by_network_name(
                node_db.id, 'fixed')

            node['fixed_interface'] = fixed_interface.name
            node['vlan_interface'] = fixed_interface.name
Exemple #42
0
    def add_vlan_interfaces(cls, nodes):
        """Assign fixed_interfaces and vlan_interface.
        They should be equal.
        """
        netmanager = NetworkManager()
        for node in nodes:
            node_db = db().query(Node).get(node['uid'])

            fixed_interface = netmanager._get_interface_by_network_name(
                node_db.id, 'fixed')

            node['fixed_interface'] = fixed_interface.name
            node['vlan_interface'] = fixed_interface.name
Exemple #43
0
    def PUT(self, node_id):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        interfaces_data = self.validator.validate_json(web.data())
        node_data = {'id': node_id, 'interfaces': interfaces_data}
        self.validator.validate(node_data)

        network_manager = NetworkManager()
        network_manager._update_attrs(node_data)
        node = self.get_object_or_404(Node, node_id)
        return self.render(node)['interfaces']
Exemple #44
0
 def __init__(self, app):
     self.db = db()
     self.app = app
     self.tester = TestCase
     self.tester.runTest = lambda a: None
     self.tester = self.tester()
     self.here = os.path.abspath(os.path.dirname(__file__))
     self.fixture_dir = os.path.join(self.here, "..", "fixtures")
     self.default_headers = {"Content-Type": "application/json"}
     self.releases = []
     self.clusters = []
     self.nodes = []
     self.network_manager = NetworkManager()
Exemple #45
0
 def serialize_for_cluster(cls, cluster):
     result = {}
     result['net_manager'] = cluster.net_manager
     result['networks'] = map(cls.serialize_network_group,
                              cluster.network_groups)
     if cluster.is_ha_mode:
         nw_metadata = cluster.release.networks_metadata
         net_manager = NetworkManager()
         for network in nw_metadata:
             if network.get("assign_vip") is not False:
                 result['{0}_vip'.format(
                     network["name"])] = net_manager.assign_vip(
                         cluster.id, network["name"])
     return result
Exemple #46
0
 def PUT(self):
     """:returns: Collection of JSONized Node objects.
     :http: * 200 (nodes are successfully updated)
            * 400 (invalid nodes data specified)
     """
     data = self.validator.validate_collection_structure(web.data())
     network_manager = NetworkManager()
     updated_nodes_ids = []
     for node_data in data:
         self.validator.verify_data_correctness(node_data)
         node_id = network_manager._update_attrs(node_data)
         updated_nodes_ids.append(node_id)
     updated_nodes = db().query(Node).filter(Node.id.in_(updated_nodes_ids)).all()
     return map(self.render, updated_nodes)
Exemple #47
0
    def POST(self):
        """:returns: JSONized Cluster object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid cluster data specified)
               * 409 (cluster with such parameters already exists)
        """
        # It's used for cluster creating only.
        data = self.checked_data()

        cluster = Cluster()
        cluster.release = db().query(Release).get(data["release"])
        # TODO(NAME): use fields
        for field in ('name', 'mode', 'net_manager'):
            if data.get(field):
                setattr(cluster, field, data.get(field))
        db().add(cluster)
        db().commit()
        attributes = Attributes(
            editable=cluster.release.attributes_metadata.get("editable"),
            generated=cluster.release.attributes_metadata.get("generated"),
            cluster=cluster
        )
        attributes.generate_fields()

        netmanager = NetworkManager()
        try:
            netmanager.create_network_groups(cluster.id)

            cluster.add_pending_changes("attributes")
            cluster.add_pending_changes("networks")

            if 'nodes' in data and data['nodes']:
                nodes = db().query(Node).filter(
                    Node.id.in_(data['nodes'])
                ).all()
                map(cluster.nodes.append, nodes)
                db().commit()
                for node in nodes:
                    netmanager.allow_network_assignment_to_all_interfaces(
                        node.id
                    )
                    netmanager.assign_networks_to_main_interface(node.id)

            raise web.webapi.created(json.dumps(
                ClusterHandler.render(cluster),
                indent=4
            ))
        except (
            errors.OutOfVLANs,
            errors.OutOfIPs,
            errors.NoSuitableCIDR,
            errors.InvalidNetworkPool
        ) as e:
            # Cluster was created in this request,
            # so we no need to use ClusterDeletionManager.
            # All relations wiil be cascade deleted automaticly.
            # TODO(NAME): investigate transactions
            db().delete(cluster)

            raise web.badrequest(e.message)
Exemple #48
0
    def add_vlan_interfaces(cls, nodes):
        """We shouldn't pass to orchetrator fixed network
        when network manager is VlanManager, but we should specify
        fixed_interface (private_interface in terms of fuel) as result
        we just pass vlan_interface as node attribute.
        """
        netmanager = NetworkManager()
        for node in nodes:
            node_db = db().query(Node).get(node['uid'])

            fixed_interface = netmanager._get_interface_by_network_name(
                node_db.id, 'fixed')

            node['vlan_interface'] = fixed_interface.name
Exemple #49
0
 def PUT(self):
     """:returns: Collection of JSONized Node objects.
     :http: * 200 (nodes are successfully updated)
            * 400 (invalid nodes data specified)
     """
     data = self.validator.validate_collection_structure(web.data())
     network_manager = NetworkManager()
     updated_nodes_ids = []
     for node_data in data:
         self.validator.verify_data_correctness(node_data)
         node_id = network_manager._update_attrs(node_data)
         updated_nodes_ids.append(node_id)
     updated_nodes = db().query(Node).filter(
         Node.id.in_(updated_nodes_ids)).all()
     return map(self.render, updated_nodes)
class TestNetworkConfigurationHandlerHAMode(BaseIntegrationTest):
    def setUp(self):
        super(TestNetworkConfigurationHandlerHAMode, self).setUp()
        cluster = self.env.create_cluster(api=True, mode="ha_compact")
        self.cluster = self.db.query(Cluster).get(cluster["id"])
        self.net_manager = NetworkManager()

    def test_returns_management_vip_and_public_vip(self):
        url = reverse("NovaNetworkConfigurationHandler", kwargs={"cluster_id": self.cluster.id})

        resp = json.loads(self.app.get(url, headers=self.default_headers).body)

        self.assertEquals(resp["management_vip"], self.net_manager.assign_vip(self.cluster.id, "management"))

        self.assertEquals(resp["public_vip"], self.net_manager.assign_vip(self.cluster.id, "public"))
Exemple #51
0
    def __add_vlan_interfaces(cls, nodes):
        """
        We shouldn't pass to orchetrator fixed network
        when network manager is VlanManager, but we should specify
        fixed_interface (private_interface in terms of fuel) as result
        we just pass vlan_interface as node attribute.
        """
        netmanager = NetworkManager()
        for node in nodes:
            node_db = orm().query(Node).get(node['id'])

            fixed_interface = netmanager._get_interface_by_network_name(
                node_db, 'fixed')

            node['vlan_interface'] = fixed_interface.name
    def serialize_for_cluster(cls, cluster):
        result = {}
        result['net_manager'] = cluster.net_manager
        result['networks'] = map(
            cls.serialize_network_group,
            cluster.network_groups
        )

        if cluster.mode == 'ha':
            net_manager = NetworkManager()
            result['management_vip'] = net_manager.assign_vip(
                cluster.id, 'management')
            result['public_vip'] = net_manager.assign_vip(
                cluster.id, 'public')
        return result
    def setUp(self):
        super(TestRepoAvailability, self).setUp()
        self.env.create(cluster_kwargs={
            'net_provider': 'neutron',
            'net_segment_type': 'gre'
        },
                        nodes_kwargs=[{
                            'roles': ['controller']
                        }, {
                            'roles': ['controller']
                        }, {
                            'roles': ['compute']
                        }, {
                            'roles': ['compute'],
                            'online': False
                        }])

        self.cluster = self.env.clusters[0]
        self.public_ng = next(ng for ng in self.cluster.network_groups
                              if ng.name == 'public')
        self.free_ips = NetworkManager.get_free_ips(self.public_ng, 2)
        self.repo_urls = objects.Cluster.get_repo_urls(self.cluster)
        self.controllers = [
            n for n in self.cluster.nodes if 'controller' in n.all_roles
        ]
        self.online_uids = [n.uid for n in self.cluster.nodes if n.online]
Exemple #54
0
    def get_common_attrs(cls, cluster):
        """Common attributes for all facts
        """
        common_attrs = super(DeploymentHASerializer,
                             cls).get_common_attrs(cluster)

        for ng in cluster.network_groups:
            if ng.meta.get("assign_vip"):
                common_attrs[ng.name + '_vip'] = NetworkManager.assign_vip(
                    cluster.id, ng.name)

        common_attrs['mp'] = [{
            'point': '1',
            'weight': '1'
        }, {
            'point': '2',
            'weight': '2'
        }]

        sorted_nodes = sorted(common_attrs['nodes'],
                              key=lambda node: int(node['uid']))

        controller_nodes = cls.filter_by_roles(
            sorted_nodes, ['controller', 'primary-controller'])
        common_attrs['last_controller'] = controller_nodes[-1]['name']

        # Assign primary controller in nodes list
        cls.set_primary_controller(common_attrs['nodes'])

        return common_attrs
Exemple #55
0
    def get_default(self, node):
        if node.cluster and node.cluster.net_provider == 'neutron':
            network_manager = NeutronManager()
        else:
            network_manager = NetworkManager()

        return network_manager.get_default_networks_assignment(node)
Exemple #56
0
 def POST(self, cluster_id):
     """:returns: Empty string
     :http: * 204 (node successfully unassigned)
            * 404 (node not found in db)
     """
     nodes = self.checked_data(self.validator.validate_collection_update)
     for node in nodes:
         if node.status == "discover":
             node.cluster.clear_pending_changes(node_id=node.id)
             node.pending_roles = []
             node.cluster_id = None
             node.pending_addition = False
             NetworkManager.clear_assigned_networks(node)
         else:
             node.pending_deletion = True
         db().commit()
     raise web.ok
Exemple #57
0
    def admin_interface(self):
        """Iterate over interfaces, if admin subnet include
        ip address of current interface then return this interface.

        :raises: errors.CanNotFindInterface
        """
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()

        for interface in self.interfaces:
            ip_addr = interface.ip_addr
            if network_manager.is_ip_belongs_to_admin_subnet(ip_addr):
                return interface

        logger.warning(u'Cannot find admin interface for node '
                       'return first interface: "%s"' % self.full_name)
        return self.interfaces[0]
Exemple #58
0
    def render(cls, nodes, fields=None):
        json_list = []
        network_manager = NetworkManager()
        ips_mapped = network_manager.get_grouped_ips_by_node()
        networks_grouped = network_manager.get_networks_grouped_by_cluster()
        for node in nodes:
            try:
                json_data = JSONHandler.render(node, fields=cls.fields)

                json_data['network_data'] = network_manager.\
                    get_node_networks_optimized(
                        node, ips_mapped.get(node.id, []),
                        networks_grouped.get(node.cluster_id, []))
                json_list.append(json_data)
            except Exception:
                logger.error(traceback.format_exc())
        return json_list