示例#1
0
    def run(self):
        super(FakeDeletionThread, self).run()
        receiver = NailgunReceiver
        kwargs = {
            'task_uuid': self.task_uuid,
            'nodes': self.data['args']['nodes'],
            'status': 'ready'
        }
        nodes_to_restore = self.data['args'].get('nodes_to_restore', [])
        resp_method = getattr(receiver, self.respond_to)
        resp_method(**kwargs)

        recover_nodes = self.params.get("recover_nodes", True)

        if not recover_nodes:
            return

        for node_data in nodes_to_restore:
            node = Node(**node_data)

            # Offline node just deleted from db
            # and could not recreated with status
            # discover
            if not node.online:
                continue

            node.status = 'discover'
            db().add(node)
            db().commit()
            node.attributes = NodeAttributes(node_id=node.id)
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            NetworkManager.update_interfaces_info(node)
            db().commit()

            ram = round(node.meta.get('ram') or 0, 1)
            cores = node.meta.get('cores') or 'unknown'
            notifier.notify("discover",
                            "New node with %s CPU core(s) "
                            "and %s GB memory is discovered" %
                            (cores, ram), node_id=node.id)
示例#2
0
 def create_attributes(self):
     return NodeAttributes()
示例#3
0
文件: node.py 项目: tsipa/fuel-web
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()
        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data.get(u'status'))
            logger.warning(msg)
            raise error
        node = Node(name="Untitled (%s)" % data['mac'][-5:],
                    timestamp=datetime.now())
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes("disks", node_id=node.id)
        except Exception as exc:
            msg = (u"Failed to generate volumes "
                   "info for node '{0}': '{1}'").format(
                       node.name or data.get("mac") or data.get("id"),
                       str(exc) or "see logs for details")
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node)

        if node.cluster_id:
            network_manager = node.cluster.network_manager
            network_manager.assign_networks_by_default(node)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(
                round(float(node.meta['memory']['total']) / 1073741824,
                      1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(
                float(
                    sum([d["size"] for d in node.meta["disks"]]) / 1000000000),
                1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify("discover",
                        "New node is discovered: %s CPUs / %s / %s " %
                        (cores, ram, hd_size),
                        node_id=node.id)
        raise web.webapi.created(json.dumps(NodeHandler.render(node),
                                            indent=4))
示例#4
0
文件: node.py 项目: tsipa/fuel-web
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            node = None
            if nd.get("mac"):
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])

            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()

            old_cluster_id = node.cluster_id

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                    node.reset_name_to_default()
                node.cluster_id = nd["cluster_id"]

            regenerate_volumes = any(
                ('roles' in nd
                 and set(nd['roles']) != set(node.roles), 'pending_roles' in nd
                 and set(nd['pending_roles']) != set(node.pending_roles),
                 node.cluster_id != old_cluster_id))

            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status in ('provisioning', 'error'):
                    # We don't update provisioning and error back to discover
                    logger.debug("Node has provisioning or error status - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                # don't update node ID
                elif key != "id":
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)),
                            regenerate_volumes)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()

            network_manager = NetworkManager

            if is_agent:
                # Update node's NICs.
                network_manager.update_interfaces_info(node)
                db().commit()

            nodes_updated.append(node.id)
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                if node.cluster:
                    network_manager = node.cluster.network_manager
                    network_manager.assign_networks_by_default(node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('nic_interfaces'),
            joinedload('nic_interfaces.assigned_networks_list'),
            joinedload('bond_interfaces'),
            joinedload('bond_interfaces.assigned_networks_list')).\
            filter(Node.id.in_(nodes_updated)).all()
        return self.render(nodes)
示例#5
0
文件: node.py 项目: e0ne/fuel-web
    def PUT(self, node_id):
        """:returns: JSONized Node object.
        :http: * 200 (OK)
               * 400 (invalid node data specified)
               * 404 (node not found in db)
        """
        node = self.get_object_or_404(Node, node_id)
        if not node.attributes:
            node.attributes = NodeAttributes(node_id=node.id)

        data = self.checked_data(self.validator.validate_update)

        network_manager = NetworkManager

        old_cluster_id = node.cluster_id

        if data.get("pending_roles") == [] and node.cluster:
            node.cluster.clear_pending_changes(node_id=node.id)

        if "cluster_id" in data:
            if data["cluster_id"] is None and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)
                node.roles = node.pending_roles = []
            node.cluster_id = data["cluster_id"]
            if node.cluster_id != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if node.cluster_id:
                    network_manager = node.cluster.network_manager
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node
                    )

        regenerate_volumes = any((
            'roles' in data and set(data['roles']) != set(node.roles),
            'pending_roles' in data and
            set(data['pending_roles']) != set(node.pending_roles),
            node.cluster_id != old_cluster_id
        ))

        for key, value in data.iteritems():
            # we don't allow to update id explicitly
            # and updated cluster_id before all other fields
            if key in ("id", "cluster_id"):
                continue
            setattr(node, key, value)

        if not node.status in ('provisioning', 'deploying'
                               ) and regenerate_volumes:
            try:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
            except Exception as exc:
                msg = (
                    u"Failed to generate volumes "
                    "info for node '{0}': '{1}'"
                ).format(
                    node.name or data.get("mac") or data.get("id"),
                    str(exc) or "see logs for details"
                )
                logger.warning(traceback.format_exc())
                notifier.notify("error", msg, node_id=node.id)
        db().commit()
        return self.render(node)