Exemple #1
0
    def POST(self, cluster_id):
        cluster = self.get_object_or_404(objects.Cluster, cluster_id)
        data = self.checked_data()
        node_id = data["node_id"]
        node = self.get_object_or_404(objects.Node, node_id)

        netgroups_mapping = self.get_netgroups_map(node.cluster, cluster)

        orig_roles = node.roles

        objects.Node.update_roles(node, [])  # flush
        objects.Node.update_pending_roles(node, [])  # flush

        node.replaced_deployment_info = []
        node.deployment_info = []
        node.kernel_params = None
        node.cluster_id = cluster.id
        node.group_id = None

        objects.Node.assign_group(node)  # flush
        objects.Node.update_pending_roles(node, orig_roles)  # flush

        for ip in node.ip_addrs:
            ip.network = netgroups_mapping[ip.network]

        nic_assignments = db.query(models.NetworkNICAssignment).\
            join(models.NodeNICInterface).\
            filter(models.NodeNICInterface.node_id == node.id).\
            all()
        for nic_assignment in nic_assignments:
            nic_assignment.network_id = \
                netgroups_mapping[nic_assignment.network_id]

        bond_assignments = db.query(models.NetworkBondAssignment).\
            join(models.NodeBondInterface).\
            filter(models.NodeBondInterface.node_id == node.id).\
            all()
        for bond_assignment in bond_assignments:
            bond_assignment.network_id = \
                netgroups_mapping[bond_assignment.network_id]

        objects.Node.add_pending_change(node,
                                        consts.CLUSTER_CHANGES.interfaces)

        node.pending_addition = True
        node.pending_deletion = False

        task = models.Task(name=consts.TASK_NAMES.node_deletion,
                           cluster=cluster)

        db.commit()

        self.delete_node_by_astute(task, node)
Exemple #2
0
    def POST(self, cluster_id):
        cluster = self.get_object_or_404(objects.Cluster, cluster_id)
        data = self.checked_data()
        node_id = data["node_id"]
        node = self.get_object_or_404(objects.Node, node_id)

        netgroups_mapping = self.get_netgroups_map(node.cluster, cluster)

        orig_roles = node.roles

        objects.Node.update_roles(node, [])  # flush
        objects.Node.update_pending_roles(node, [])  # flush

        node.replaced_deployment_info = []
        node.deployment_info = []
        node.kernel_params = None
        node.cluster_id = cluster.id
        node.group_id = None

        objects.Node.assign_group(node)  # flush
        objects.Node.update_pending_roles(node, orig_roles)  # flush

        for ip in node.ip_addrs:
            ip.network = netgroups_mapping[ip.network]

        nic_assignments = db.query(models.NetworkNICAssignment).\
            join(models.NodeNICInterface).\
            filter(models.NodeNICInterface.node_id == node.id).\
            all()
        for nic_assignment in nic_assignments:
            nic_assignment.network_id = \
                netgroups_mapping[nic_assignment.network_id]

        bond_assignments = db.query(models.NetworkBondAssignment).\
            join(models.NodeBondInterface).\
            filter(models.NodeBondInterface.node_id == node.id).\
            all()
        for bond_assignment in bond_assignments:
            bond_assignment.network_id = \
                netgroups_mapping[bond_assignment.network_id]

        objects.Node.add_pending_change(node,
                                        consts.CLUSTER_CHANGES.interfaces)

        node.pending_addition = True
        node.pending_deletion = False

        task = models.Task(name=consts.TASK_NAMES.node_deletion,
                           cluster=cluster)

        db.commit()

        self.delete_node_by_astute(task, node)
Exemple #3
0
 def validate_collection_update(cls, data, cluster_id=None):
     data = cls.validate_json(data)
     cls.validate_schema(data, assignment_format_schema)
     dict_data = dict((d["id"], d["roles"]) for d in data)
     received_node_ids = dict_data.keys()
     nodes = db.query(Node).filter(Node.id.in_(received_node_ids))
     cls.check_all_nodes(nodes, received_node_ids)
     cls.check_if_already_done(nodes)
     release = db.query(Cluster).get(cluster_id).release
     for node_id in received_node_ids:
         cls.validate_roles(release, dict_data[node_id])
     return dict_data
Exemple #4
0
    def test_fails_if_there_is_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name, cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.assertRaisesWithMessage(
                errors.DeploymentAlreadyStarted,
                'Cannot perform the actions because there are '
                'running tasks {0}'.format([task]),
                DeploymentCheckMixin.check_no_running_deployment, self.cluster)

            db.query(models.Task).delete()
    def test_fails_if_there_is_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name, cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.assertRaisesWithMessage(
                errors.DeploymentAlreadyStarted,
                'Cannot perform the actions because there are '
                'running tasks {0}'.format([task]),
                DeploymentCheckMixin.check_no_running_deployment,
                self.cluster)

            db.query(models.Task).delete()
Exemple #6
0
 def validate_collection_update(cls, data, cluster_id=None):
     data = cls.validate_json(data)
     cls.validate_schema(data, assignment_format_schema)
     dict_data = dict((d["id"], d["roles"]) for d in data)
     received_node_ids = dict_data.keys()
     nodes = db.query(Node).filter(Node.id.in_(received_node_ids))
     cls.check_all_nodes(nodes, received_node_ids)
     cls.check_if_already_done(nodes)
     release = db.query(Cluster).get(cluster_id).release
     for node_id in received_node_ids:
         cls.validate_roles(
             release,
             dict_data[node_id]
         )
     return dict_data
Exemple #7
0
 def change_cluster_list(env_id, cluster_list):
     cluster_db = db.query(Cluster).get(env_id)
     cluster_db.vmware_attributes.editable = deepcopy(
         cluster_db.vmware_attributes.editable)
     cluster_db.vmware_attributes.editable['value']['availability_zones'][
         0]['nova_computes'][0]['vsphere_cluster'] = cluster_list
     db.commit()
Exemple #8
0
 def validate_collection_update(cls, data):
     list_data = cls.validate_json(data)
     cls.validate_schema(list_data, unassignment_format_schema)
     node_ids = [n['id'] for n in list_data]
     nodes = db.query(Node).filter(Node.id.in_(node_ids))
     cls.check_all_nodes(nodes, node_ids)
     cls.check_if_already_done(nodes)
     return nodes
Exemple #9
0
 def validate_collection_update(cls, data):
     list_data = cls.validate_json(data)
     cls.validate_schema(list_data, unassignment_format_schema)
     node_ids = [n['id'] for n in list_data]
     nodes = db.query(Node).filter(Node.id.in_(node_ids))
     cls.check_all_nodes(nodes, node_ids)
     cls.check_if_already_done(nodes)
     return nodes
Exemple #10
0
    def filter_by_graph_types(cls, graph_types):
        assocs = []
        for _, assoc_model in cls.single.associations:
            assocs.extend(assoc.deployment_graph
                          for assoc in db.query(assoc_model).filter(
                              assoc_model.type.in_(graph_types)).all())

        return assocs
Exemple #11
0
    def check_no_running_deployment(cls, cluster):
        tasks_q = objects.TaskCollection.get_by_name_and_cluster(
            cluster, cls.deployment_tasks).filter_by(
                status=consts.TASK_STATUSES.running)

        tasks_exists = db.query(tasks_q.exists()).scalar()
        if tasks_exists:
            raise errors.DeploymentAlreadyStarted(
                'Cannot perform the actions because there are '
                'running tasks {0}'.format(tasks_q.all()))
Exemple #12
0
    def check_no_running_deployment(cls, cluster):
        tasks_q = objects.TaskCollection.get_by_name_and_cluster(
            cluster, cls.deployment_tasks).filter_by(
                status=consts.TASK_STATUSES.running)

        tasks_exists = db.query(tasks_q.exists()).scalar()
        if tasks_exists:
            raise errors.DeploymentAlreadyStarted(
                'Cannot perform the actions because there are '
                'running tasks {0}'.format(tasks_q.all()))
Exemple #13
0
    def _validate_nodes(cls, new_node_ids, instance):
        set_new_node_ids = set(new_node_ids)
        set_old_node_ids = set(objects.Cluster.get_nodes_ids(instance))
        nodes_to_add = set_new_node_ids - set_old_node_ids
        nodes_to_remove = set_old_node_ids - set_new_node_ids

        hostnames_to_add = [
            x[0] for x in db.query(Node.hostname).filter(
                Node.id.in_(nodes_to_add)).all()
        ]

        duplicated = [
            x[0] for x in db.query(Node.hostname).filter(
                sa.and_(Node.hostname.in_(hostnames_to_add), Node.cluster_id ==
                        instance.id, Node.id.notin_(nodes_to_remove))).all()
        ]
        if duplicated:
            raise errors.AlreadyExists(
                "Nodes with hostnames [{0}] already exist in cluster {1}.".
                format(",".join(duplicated), instance.id))
Exemple #14
0
 def validate_collection_update(cls, data, cluster_id=None):
     list_data = cls.validate_json(data)
     cls.validate_schema(list_data, unassignment_format_schema)
     node_ids_set = set(n['id'] for n in list_data)
     nodes = db.query(Node).filter(Node.id.in_(node_ids_set))
     node_id_cluster_map = dict((n.id, n.cluster_id) for n in db.query(
         Node.id, Node.cluster_id).filter(Node.id.in_(node_ids_set)))
     other_cluster_ids_set = set(node_id_cluster_map.values()) - \
         set((int(cluster_id),))
     if other_cluster_ids_set:
         raise errors.InvalidData(
             u"Nodes [{0}] are not members of environment {1}.".format(
                 u", ".join(
                     str(n_id)
                     for n_id, c_id in node_id_cluster_map.iteritems()
                     if c_id in other_cluster_ids_set), cluster_id),
             log_message=True)
     cls.check_all_nodes(nodes, node_ids_set)
     cls.check_if_already_done(nodes)
     return nodes
Exemple #15
0
    def get_for_model(cls, instance):
        """Get deployment graphs related to given model.

        :param instance: model that could have relation to graph
        :type instance: models.Plugin|models.Cluster|models.Release|
        :return: graph instance
        :rtype: model.DeploymentGraph
        """
        association_model = cls.single.get_association_for_model(instance)
        graphs = db.query(models.DeploymentGraph).join(association_model).join(
            instance.__class__).filter(instance.__class__.id == instance.id)
        return graphs.all()
Exemple #16
0
    def _validate_nodes(cls, new_node_ids, instance):
        set_new_node_ids = set(new_node_ids)
        set_old_node_ids = set(objects.Cluster.get_nodes_ids(instance))
        nodes_to_add = set_new_node_ids - set_old_node_ids
        nodes_to_remove = set_old_node_ids - set_new_node_ids

        hostnames_to_add = [x[0] for x in db.query(Node.hostname)
                            .filter(Node.id.in_(nodes_to_add)).all()]

        duplicated = [x[0] for x in db.query(Node.hostname).filter(
            sa.and_(
                Node.hostname.in_(hostnames_to_add),
                Node.cluster_id == instance.id,
                Node.id.notin_(nodes_to_remove)
            )
        ).all()]
        if duplicated:
            raise errors.AlreadyExists(
                "Nodes with hostnames [{0}] already exist in cluster {1}."
                .format(",".join(duplicated), instance.id)
            )
Exemple #17
0
    def test_does_not_fail_if_there_is_deleted_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name,
                               deleted_at=datetime.datetime.now(),
                               cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.addCleanup(db.query(models.Task).delete)

            self.assertNotRaises(
                errors.DeploymentAlreadyStarted,
                DeploymentCheckMixin.check_no_running_deployment, self.cluster)
    def test_does_not_fail_if_there_is_deleted_task(self):
        for task_name in DeploymentCheckMixin.deployment_tasks:
            task = models.Task(name=task_name,
                               deleted_at=datetime.datetime.now(),
                               cluster_id=self.cluster.id)
            db.add(task)
            db.flush()
            self.addCleanup(db.query(models.Task).delete)

            self.assertNotRaises(
                errors.DeploymentAlreadyStarted,
                DeploymentCheckMixin.check_no_running_deployment,
                self.cluster)
Exemple #19
0
    def get_assigned_vips_for_controller_group(cls, cluster):
        """Get VIPs assigned in specified cluster's controller node group

        :param cluster: Cluster object
        :type cluster: Cluster model
        :returns: VIPs for given cluster
        """
        node_group_id = Cluster.get_controllers_group_id(cluster)
        cluster_vips = db.query(models.IPAddr).join(
            models.IPAddr.network_data).filter(
                models.IPAddr.vip_name.isnot(None)
                & (models.NetworkGroup.group_id == node_group_id))
        return cluster_vips
Exemple #20
0
    def get_assigned_vips_for_controller_group(cls, cluster):
        """Get VIPs assigned in specified cluster's controller node group

        :param cluster: Cluster object
        :type cluster: Cluster model
        :returns: VIPs for given cluster
        """
        node_group_id = Cluster.get_controllers_group_id(cluster)
        cluster_vips = db.query(models.IPAddr).join(
            models.IPAddr.network_data).filter(
                models.IPAddr.vip_name.isnot(None) &
                (models.NetworkGroup.group_id == node_group_id))
        return cluster_vips
Exemple #21
0
 def check_unique_hostnames(cls, nodes, cluster_id):
     hostnames = [node.hostname for node in nodes]
     conflicting_hostnames = [
         x[0] for x in db.query(Node.hostname).filter(
             sa.and_(
                 Node.hostname.in_(hostnames),
                 Node.cluster_id == cluster_id,
             )).all()
     ]
     if conflicting_hostnames:
         raise errors.AlreadyExists(
             "Nodes with hostnames [{0}] already exist in cluster {1}.".
             format(",".join(conflicting_hostnames), cluster_id))
Exemple #22
0
    def validate_collection_update(cls, data, cluster_id=None):
        data = cls.validate_json(data)
        cls.validate_schema(data, assignment_format_schema)
        dict_data = dict((d["id"], d["roles"]) for d in data)
        received_node_ids = dict_data.keys()
        nodes = db.query(Node).filter(Node.id.in_(received_node_ids))
        cls.check_all_nodes(nodes, received_node_ids)
        cls.check_if_already_done(nodes)
        cluster = objects.Cluster.get_by_uid(cluster_id,
                                             fail_if_not_found=True)

        for node_id in received_node_ids:
            cls.validate_roles(cluster, dict_data[node_id])
        return dict_data
Exemple #23
0
 def _get_admin_node_network(cls, node_id):
     net = cls.get_admin_network_group()
     net_cidr = IPNetwork(net.cidr)
     node = db.query(Node).get(node_id)
     ip_addr = cls.get_admin_ip_for_node(node)
     return {
         'name': net.name,
         'vlan': net.vlan_start,
         'ip': "{0}/{1}".format(ip_addr, net_cidr.prefixlen),
         'netmask': str(net_cidr.netmask),
         'brd': str(net_cidr.broadcast),
         'gateway': net.gateway,
         'dev': node.admin_interface.name
     }
 def check_unique_hostnames(cls, nodes, cluster_id):
     hostnames = [node.hostname for node in nodes]
     conflicting_hostnames = [x[0] for x in db.query(
         Node.hostname).filter(
             sa.and_(
                 Node.hostname.in_(hostnames),
                 Node.cluster_id == cluster_id,
             )
         ).all()]
     if conflicting_hostnames:
         raise errors.AlreadyExists(
             "Nodes with hostnames [{0}] already exist in cluster {1}."
             .format(",".join(conflicting_hostnames), cluster_id)
         )
Exemple #25
0
 def validate_collection_update(cls, data, cluster_id=None):
     list_data = cls.validate_json(data)
     cls.validate_schema(list_data, unassignment_format_schema)
     node_ids_set = set(n['id'] for n in list_data)
     nodes = db.query(Node).filter(Node.id.in_(node_ids_set))
     node_id_cluster_map = dict(
         (n.id, n.cluster_id) for n in
         db.query(Node.id, Node.cluster_id).filter(
             Node.id.in_(node_ids_set)))
     other_cluster_ids_set = set(node_id_cluster_map.values()) - \
         set((int(cluster_id),))
     if other_cluster_ids_set:
         raise errors.InvalidData(
             u"Nodes [{0}] are not members of environment {1}."
             .format(
                 u", ".join(
                     str(n_id) for n_id, c_id in
                     node_id_cluster_map.iteritems()
                     if c_id in other_cluster_ids_set
                 ), cluster_id), log_message=True
         )
     cls.check_all_nodes(nodes, node_ids_set)
     return nodes
Exemple #26
0
 def _get_admin_node_network(cls, node_id):
     net = cls.get_admin_network_group()
     net_cidr = IPNetwork(net.cidr)
     node = db.query(Node).get(node_id)
     ip_addr = cls.get_admin_ip_for_node(node)
     return {
         'name': net.name,
         'vlan': net.vlan_start,
         'ip': "{0}/{1}".format(ip_addr, net_cidr.prefixlen),
         'netmask': str(net_cidr.netmask),
         'brd': str(net_cidr.broadcast),
         'gateway': net.gateway,
         'dev': node.admin_interface.name
     }
Exemple #27
0
    def get_objects_list_or_404(self, model, ids):
        """Get list of objects

        :param model: model object
        :param ids: list of ids

        :raises: web.notfound
        :returns: query object
        """
        node_query = db.query(model).filter(model.id.in_(ids))
        objects_count = node_query.count()

        if len(set(ids)) != objects_count:
            raise web.notfound('{0} not found'.format(model.__name__))

        return node_query
Exemple #28
0
    def get_objects_list_or_404(self, model, ids):
        """Get list of objects

        :param model: model object
        :param ids: list of ids

        :raises: web.notfound
        :returns: query object
        """
        node_query = db.query(model).filter(model.id.in_(ids))
        objects_count = node_query.count()

        if len(set(ids)) != objects_count:
            raise web.notfound('{0} not found'.format(model.__name__))

        return node_query
Exemple #29
0
    def validate_collection_update(cls, data, cluster_id=None):
        data = cls.validate_json(data)
        cls.validate_schema(data, assignment_format_schema)
        dict_data = dict((d["id"], d["roles"]) for d in data)
        received_node_ids = dict_data.keys()
        nodes = db.query(Node).filter(Node.id.in_(received_node_ids))
        cls.check_all_nodes(nodes, received_node_ids)
        cls.check_if_already_done(nodes)
        cluster = objects.Cluster.get_by_uid(
            cluster_id, fail_if_not_found=True
        )

        for node_id in received_node_ids:
            cls.validate_roles(
                cluster,
                dict_data[node_id]
            )
        return dict_data
    def get_for_model(cls, instance):
        """Get deployment graphs related to given model.

        :param instance: model that could have relation to graph
        :type instance: models.Plugin|models.Cluster|models.Release|
        :return: graph instance
        :rtype: model.DeploymentGraph
        """
        association_model = cls.single.get_association_for_model(instance)
        graphs = db.query(
            models.DeploymentGraph
        ).join(
            association_model
        ).join(
            instance.__class__
        ).filter(
            instance.__class__.id == instance.id
        )
        return graphs.all()
Exemple #31
0
    def prepare_data(cls, data):
        """Prepares input data.

        Filter input data based on the fact that
        updating parameters of the fuel admin network
        is forbidden for default node group.

        Admin network cannot be updated because of:
        - sharing itself between environments;
        - having no mechanism to change its parameters on deployed Master node.
        """
        if data.get("networks"):
            default_admin = db.query(
                NetworkGroup).filter_by(group_id=None).first()
            data["networks"] = [
                n for n in data["networks"]
                if n.get("id") != default_admin.id
            ]

        return data
Exemple #32
0
    def copy_vips(orig_cluster, new_cluster):
        orig_vips = {}
        for ng in orig_cluster.network_groups:
            vips = db.query(models.IPAddr).filter(
                models.IPAddr.network == ng.id,
                models.IPAddr.node.is_(None),
                models.IPAddr.vip_type.isnot(None),
            ).all()
            orig_vips[ng.name] = list(vips)

        new_vips = []
        for ng in new_cluster.network_groups:
            orig_ng_vips = orig_vips.get(ng.name)
            for vip in orig_ng_vips:
                ip_addr = models.IPAddr(
                    network=ng.id,
                    ip_addr=vip.ip_addr,
                    vip_type=vip.vip_type,
                )
                new_vips.append(ip_addr)
        db.add_all(new_vips)
        db.commit()
Exemple #33
0
    def _validate_unique_name(cls, data, *filters):
        """Validate node group name to be unique.

        Validate whether node group name is unique for specific
        environment. Prevent to have duplicated node group names for
        the same environment.

        :param data: data which contains node group name and cluster_id.
        :param filters: additional filters for the query which is
                        used in the method for validation.
        :type data: dict
        :type filters: list
        :returns: None
        """
        nodegroup_query = objects.NodeGroupCollection.filter_by(None, name=data["name"], cluster_id=data["cluster_id"])
        if filters:
            nodegroup_query = nodegroup_query.filter(*filters)
        nodegroup_exists = db.query(nodegroup_query.exists()).scalar()
        if nodegroup_exists:
            raise errors.NotAllowed(
                "Node group '{0}' already exists " "in environment {1}.".format(data["name"], data["cluster_id"])
            )
Exemple #34
0
    def copy_vips(orig_cluster, new_cluster):
        orig_vips = {}
        for ng in orig_cluster.network_groups:
            vips = db.query(models.IPAddr).filter(
                models.IPAddr.network == ng.id,
                models.IPAddr.node.is_(None),
                models.IPAddr.vip_type.isnot(None),
            ).all()
            orig_vips[ng.name] = list(vips)

        new_vips = []
        for ng in new_cluster.network_groups:
            orig_ng_vips = orig_vips.get(ng.name)
            for vip in orig_ng_vips:
                ip_addr = models.IPAddr(
                    network=ng.id,
                    ip_addr=vip.ip_addr,
                    vip_type=vip.vip_type,
                )
                new_vips.append(ip_addr)
        db.add_all(new_vips)
        db.commit()
Exemple #35
0
    def _validate_unique_name(cls, data, *filters):
        """Validate node group name to be unique.

        Validate whether node group name is unique for specific
        environment. Prevent to have duplicated node group names for
        the same environment.

        :param data: data which contains node group name and cluster_id.
        :param filters: additional filters for the query which is
                        used in the method for validation.
        :type data: dict
        :type filters: list
        :returns: None
        """
        nodegroup_query = objects.NodeGroupCollection.filter_by(
            None, name=data['name'], cluster_id=data['cluster_id'])
        if filters:
            nodegroup_query = nodegroup_query.filter(*filters)
        nodegroup_exists = db.query(nodegroup_query.exists()).scalar()
        if nodegroup_exists:
            raise errors.NotAllowed("Node group '{0}' already exists "
                                    "in environment {1}.".format(
                                        data['name'], data['cluster_id']))
Exemple #36
0
    def test_upgrade_node_error_msg_to_allow_long_error_msg(self):
        nodes = self.meta.tables['nodes']
        self.assertIsInstance(nodes.columns['error_msg'].type, sa.Text)

        node_uuid = '26b508d0-0d76-4159-bce9-f67ec2765480'
        long_error_msg = ''.join('a' for i in range(500))

        db.execute(
            nodes.insert(),
            [{
                'uuid': node_uuid,
                'cluster_id': None,
                'group_id': None,
                'status': 'discover',
                'meta': '{}',
                'mac': 'aa:aa:aa:aa:aa:aa',
                'error_msg': long_error_msg,
                'timestamp': datetime.datetime.utcnow(),
            }]
        )

        node = db.query(nodes).filter_by(uuid=node_uuid).first()
        self.assertEqual(long_error_msg, node.error_msg)
Exemple #37
0
 def get_by_ids(cls, ids):
     return db.query(models.Node).filter(models.Node.id.in_(ids)).all()
Exemple #38
0
 def delete_by_network(cls, ip, network):
     db.query(models.IPAddr).filter(
         models.IPAddr.ip_addr == ip,
         models.IPAddr.network == network
     ).delete(synchronize_session='fetch')
Exemple #39
0
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()

        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data[u'status'])
            logger.warning(msg)
            raise error

        node = Node()
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes("disks", node_id=node.id)
        except Exception as exc:
            msg = (u"Failed to generate volumes "
                   "info for node '{0}': '{1}'").format(
                       node.name or data.get("mac") or data.get("id"),
                       str(exc) or "see logs for details")
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node.id)
            network_manager.assign_networks_to_main_interface(node.id)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(
                round(float(node.meta['memory']['total']) / 1073741824,
                      1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(
                float(
                    sum([d["size"] for d in node.meta["disks"]]) / 1000000000),
                1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify("discover",
                        "New node is discovered: %s CPUs / %s / %s " %
                        (cores, ram, hd_size),
                        node_id=node.id)
        raise web.webapi.created(json.dumps(NodeHandler.render(node),
                                            indent=4))
Exemple #40
0
 def delete_by_ids(cls, ids):
     fire_callback_on_node_collection_delete(ids)
     db.query(cls.single.model).filter(
         cls.single.model.id.in_(ids)).delete(synchronize_session=False)
Exemple #41
0
 def delete_by_ids(cls, ids):
     db.query(Node).filter(Node.id.in_(ids)).delete('fetch')
Exemple #42
0
    def test_downgrade_node_error_msg(self):
        nodes = self.meta.tables['nodes']
        self.assertIsInstance(nodes.columns['error_msg'].type, sa.String)

        node = db.query(nodes).filter_by(uuid=self.node_uuid).first()
        self.assertEqual(node.error_msg, self.long_error_msg[:255])
 def is_cluster_in_upgrade(cls, cluster_id):
     query = cls._query_cluster_relations(cluster_id).exists()
     return db.query(query).scalar()
 def _query_cluster_relations(cluster_id):
     return db.query(models.UpgradeRelation).filter(
         (models.UpgradeRelation.orig_cluster_id == cluster_id) |
         (models.UpgradeRelation.seed_cluster_id == cluster_id))
Exemple #45
0
 def _query_cluster_relations(cluster_id):
     return db.query(models.UpgradeRelation).filter(
         (models.UpgradeRelation.orig_cluster_id == cluster_id)
         | (models.UpgradeRelation.seed_cluster_id == cluster_id))
Exemple #46
0
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()

        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data.get(u'status'))
            logger.warning(msg)
            raise error

        node = Node(
            name="Untitled (%s)" % data['mac'][-5:],
            timestamp=datetime.now()
        )
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes(
                    "disks",
                    node_id=node.id
                )
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                "info for node '{0}': '{1}'"
            ).format(
                node.name or data.get("mac") or data.get("id"),
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node)

        if node.cluster_id:
            network_manager = node.cluster.network_manager
            network_manager.assign_networks_by_default(node)
            network_manager.allow_network_assignment_to_all_interfaces(node)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(round(float(
                node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(float(
                sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify(
            "discover",
            "New node is discovered: %s CPUs / %s / %s " %
            (cores, ram, hd_size),
            node_id=node.id
        )
        raise web.webapi.created(json.dumps(
            NodeHandler.render(node),
            indent=4
        ))
Exemple #47
0
 def is_cluster_in_upgrade(cls, cluster_id):
     query = cls._query_cluster_relations(cluster_id).exists()
     return db.query(query).scalar()
Exemple #48
0
 def delete_by_ids(cls, ids):
     fire_callback_on_node_collection_delete(ids)
     db.query(cls.single.model).filter(
         cls.single.model.id.in_(ids)).delete(synchronize_session=False)
Exemple #49
0
 def get_by_ids(cls, ids):
     return db.query(models.Node).filter(models.Node.id.in_(ids)).all()
    def test_downgrade_node_error_msg(self):
        nodes = self.meta.tables['nodes']
        self.assertIsInstance(nodes.columns['error_msg'].type, sa.String)

        node = db.query(nodes).filter_by(uuid=self.node_uuid).first()
        self.assertEqual(node.error_msg, self.long_error_msg[:255])
Exemple #51
0
 def delete_by_network(cls, ip, network):
     db.query(
         models.IPAddr).filter(models.IPAddr.ip_addr == ip,
                               models.IPAddr.network == network).delete(
                                   synchronize_session='fetch')
Exemple #52
0
 def get_intersecting_ip(cls, instance, addr):
     """Get ip that intersects by ip_addr with given."""
     return db.query(cls.model).filter(
         cls.model.ip_addr == addr,
         cls.model.id != instance.id
     ).first()
Exemple #53
0
 def delete_by_ids(cls, ids):
     db.query(Node).filter(Node.id.in_(ids)).delete('fetch')