コード例 #1
0
    def run(self):
        super(FakeDeletionThread, self).run()
        receiver = NailgunReceiver
        kwargs = {
            'task_uuid': self.task_uuid,
            'nodes': self.data['args']['nodes'],
            'status': 'ready'
        }
        nodes_to_restore = self.data['args'].get('nodes_to_restore', [])
        resp_method = getattr(receiver, self.respond_to)
        resp_method(**kwargs)

        for node_data in nodes_to_restore:
            node = Node(**node_data)

            # Offline node just deleted from db
            # and could not recreated with status
            # discover
            if not node.online:
                continue

            node.status = 'discover'
            db().add(node)
            db().commit()
            node.attributes = NodeAttributes(node_id=node.id)
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            NetworkManager.update_interfaces_info(node)
            db().commit()

            ram = round(node.meta.get('ram') or 0, 1)
            cores = node.meta.get('cores') or 'unknown'
            notifier.notify("discover",
                            "New node with %s CPU core(s) "
                            "and %s GB memory is discovered" %
                            (cores, ram), node_id=node.id)
コード例 #2
0
ファイル: node.py プロジェクト: damjanek/fuelweb
    def POST(self):
        data = self.checked_data()

        node = Node()
        for key, value in data.iteritems():
            if key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)
        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        self.db.add(node)
        self.db.commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes(
                    "disks",
                    node_id=node.id
                )
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                "info for node '{0}': '{1}'"
            ).format(
                node.name or data.get("mac") or data.get("id"),
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        self.db.add(node)
        self.db.commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node.id)
            network_manager.assign_networks_to_main_interface(node.id)

        try:
            ram = str(round(float(
                node.meta['memory']['total']) / 1073741824, 1))
        except (KeyError, TypeError, ValueError):
            ram = "unknown"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify("discover",
                        "New node with %s CPU core(s) "
                        "and %s GB memory is discovered" %
                        (cores, ram), node_id=node.id)
        raise web.webapi.created(json.dumps(
            NodeHandler.render(node),
            indent=4
        ))
コード例 #3
0
ファイル: fixman.py プロジェクト: mahak/fuelweb
def upload_fixture(fileobj):
    db.expunge_all()
    fixture = json.load(template_fixture(fileobj))

    queue = Queue.Queue()
    keys = {}

    for obj in fixture:
        pk = obj["pk"]
        model_name = obj["model"].split(".")[1]

        try:
            model = itertools.dropwhile(
                lambda m: not hasattr(models, m),
                [model_name.capitalize(),
                 "".join(map(lambda n: n.capitalize(), model_name.split("_")))]
            ).next()
        except StopIteration:
            raise Exception("Couldn't find model {0}".format(model_name))

        obj['model'] = getattr(models, capitalize_model_name(model_name))
        keys[obj['model'].__tablename__] = {}

        # Check if it's already uploaded
        obj_from_db = db.query(obj['model']).get(pk)
        if obj_from_db:
            logger.info("Fixture model '%s' with pk='%s' already"
                        " uploaded. Skipping", model_name, pk)
            continue
        queue.put(obj)

    pending_objects = []

    while True:
        try:
            obj = queue.get_nowait()
        except:
            break

        new_obj = obj['model']()

        fk_fields = {}
        for field, value in obj["fields"].iteritems():
            f = getattr(obj['model'], field)
            impl = f.impl
            fk_model = None
            if hasattr(f.comparator.prop, "argument"):
                if hasattr(f.comparator.prop.argument, "__call__"):
                    fk_model = f.comparator.prop.argument()
                else:
                    fk_model = f.comparator.prop.argument.class_

            if fk_model:
                if value not in keys[fk_model.__tablename__]:
                    if obj not in pending_objects:
                        queue.put(obj)
                        pending_objects.append(obj)
                        continue
                    else:
                        logger.error(
                            u"Can't resolve foreign key "
                            "'{0}' for object '{1}'".format(
                                field,
                                obj["model"]
                            )
                        )
                        break
                else:
                    value = keys[fk_model.__tablename__][value].id

            if isinstance(impl, orm.attributes.ScalarObjectAttributeImpl):
                if value:
                    fk_fields[field] = (value, fk_model)
            elif isinstance(impl, orm.attributes.CollectionAttributeImpl):
                if value:
                    fk_fields[field] = (value, fk_model)
            elif isinstance(
                f.property.columns[0].type, sqlalchemy.types.DateTime
            ):
                if value:
                    setattr(
                        new_obj,
                        field,
                        datetime.strptime(value, "%d-%m-%Y %H:%M:%S")
                    )
                else:
                    setattr(
                        new_obj,
                        field,
                        datetime.now()
                    )
            else:
                setattr(new_obj, field, value)

        for field, data in fk_fields.iteritems():
            if isinstance(data[0], int):
                setattr(new_obj, field, db.query(data[1]).get(data[0]))
            elif isinstance(data[0], list):
                for v in data[0]:
                    getattr(new_obj, field).append(
                        db.query(data[1]).get(v)
                    )
        db.add(new_obj)
        db.commit()
        keys[obj['model'].__tablename__][obj["pk"]] = new_obj

        # UGLY HACK for testing
        if new_obj.__class__.__name__ == 'Node':
            new_obj.attributes = models.NodeAttributes()
            db.commit()
            new_obj.attributes.volumes = \
                new_obj.volume_manager.gen_volumes_info()
            network_manager = NetworkManager()
            network_manager.update_interfaces_info(new_obj.id)
            db.commit()
コード例 #4
0
ファイル: node.py プロジェクト: loles/fuelweb
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(
            self.validator.validate_collection_update
        )

        network_manager = NetworkManager()
        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]
            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug(
                        "Node is already provisioning - "
                        "status not updated by agent"
                    )
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = (
                    "disks" in node.meta and
                    len(node.meta["disks"]) != len(
                        filter(
                            lambda d: d["type"] == "disk",
                            node.attributes.volumes
                        )
                    ),
                    "roles" in nd,
                    "cluster_id" in nd
                )
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes(
                                "disks",
                                node_id=node.id
                            )
                    except Exception as exc:
                        msg = (
                            "Failed to generate volumes "
                            "info for node '{0}': '{1}'"
                        ).format(
                            node.name or data.get("mac") or data.get("id"),
                            str(exc) or "see logs for details"
                        )
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                if node.meta and 'interfaces' in node.meta:
                    # we won't update interfaces if data is invalid
                    network_manager.update_interfaces_info(node.id)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)
                    network_manager.assign_networks_to_main_interface(node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('interfaces'),
            joinedload('interfaces.assigned_networks')).\
            filter(Node.id.in_([n.id for n in nodes_updated])).all()
        return self.render(nodes)
コード例 #5
0
ファイル: node.py プロジェクト: loles/fuelweb
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()

        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data[u'status'])
            logger.warning(msg)
            raise error

        node = Node()
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes(
                    "disks",
                    node_id=node.id
                )
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                "info for node '{0}': '{1}'"
            ).format(
                node.name or data.get("mac") or data.get("id"),
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node)
            network_manager.assign_networks_to_main_interface(node)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(round(float(
                node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(float(
                sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify(
            "discover",
            "New node is discovered: %s CPUs / %s / %s " %
            (cores, ram, hd_size),
            node_id=node.id
        )
        raise web.webapi.created(json.dumps(
            NodeHandler.render(node),
            indent=4
        ))
コード例 #6
0
def upload_fixture(fileobj):
    db.expunge_all()
    fixture = json.load(
        template_fixture(fileobj)
    )

    queue = Queue.Queue()
    keys = {}

    for obj in fixture:
        pk = obj["pk"]
        model_name = obj["model"].split(".")[1]

        try:
            itertools.dropwhile(
                lambda m: not hasattr(models, m),
                [model_name.capitalize(),
                 "".join(map(lambda n: n.capitalize(), model_name.split("_")))]
            ).next()
        except StopIteration:
            raise Exception("Couldn't find model {0}".format(model_name))

        obj['model'] = getattr(models, capitalize_model_name(model_name))
        keys[obj['model'].__tablename__] = {}

        # Check if it's already uploaded
        obj_from_db = db.query(obj['model']).get(pk)
        if obj_from_db:
            logger.info("Fixture model '%s' with pk='%s' already"
                        " uploaded. Skipping", model_name, pk)
            continue
        queue.put(obj)

    pending_objects = []

    while True:
        try:
            obj = queue.get_nowait()
        except Exception:
            break

        new_obj = obj['model']()

        fk_fields = {}
        for field, value in obj["fields"].iteritems():
            f = getattr(obj['model'], field)
            impl = getattr(f, 'impl', None)
            fk_model = None
            try:
                if hasattr(f.comparator.prop, "argument"):
                    if hasattr(f.comparator.prop.argument, "__call__"):
                        fk_model = f.comparator.prop.argument()
                    else:
                        fk_model = f.comparator.prop.argument.class_
            except AttributeError:
                pass

            if fk_model:
                if value not in keys[fk_model.__tablename__]:
                    if obj not in pending_objects:
                        queue.put(obj)
                        pending_objects.append(obj)
                        continue
                    else:
                        logger.error(
                            u"Can't resolve foreign key "
                            "'{0}' for object '{1}'".format(
                                field,
                                obj["model"]
                            )
                        )
                        break
                else:
                    value = keys[fk_model.__tablename__][value].id

            if isinstance(impl, orm.attributes.ScalarObjectAttributeImpl):
                if value:
                    fk_fields[field] = (value, fk_model)
            elif isinstance(impl, orm.attributes.CollectionAttributeImpl):
                if value:
                    fk_fields[field] = (value, fk_model)
            elif hasattr(f, 'property') and isinstance(
                f.property.columns[0].type, sqlalchemy.types.DateTime
            ):
                if value:
                    setattr(
                        new_obj,
                        field,
                        datetime.strptime(value, "%d-%m-%Y %H:%M:%S")
                    )
                else:
                    setattr(
                        new_obj,
                        field,
                        datetime.now()
                    )
            else:
                setattr(new_obj, field, value)

        for field, data in fk_fields.iteritems():
            if isinstance(data[0], int):
                setattr(new_obj, field, db.query(data[1]).get(data[0]))
            elif isinstance(data[0], list):
                for v in data[0]:
                    getattr(new_obj, field).append(
                        db.query(data[1]).get(v)
                    )
        db.add(new_obj)
        db.commit()
        keys[obj['model'].__tablename__][obj["pk"]] = new_obj

        # UGLY HACK for testing
        if new_obj.__class__.__name__ == 'Node':
            new_obj.attributes = models.NodeAttributes()
            db.commit()
            new_obj.attributes.volumes = \
                new_obj.volume_manager.gen_volumes_info()
            network_manager = NetworkManager()
            network_manager.update_interfaces_info(new_obj)
            db.commit()
コード例 #7
0
ファイル: node.py プロジェクト: 99cloud-net/fuelweb
    def POST(self):
        data = self.checked_data()

        node = Node()
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)
        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes(
                    "disks",
                    node_id=node.id
                )
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                "info for node '{0}': '{1}'"
            ).format(
                node.name or data.get("mac") or data.get("id"),
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node.id)
            network_manager.assign_networks_to_main_interface(node.id)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(round(float(
                node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(float(
                sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify(
            "discover",
            "New node is discovered: %s CPUs / %s / %s " %
            (cores, ram, hd_size),
            node_id=node.id
        )
        raise web.webapi.created(json.dumps(
            NodeHandler.render(node),
            indent=4
        ))
コード例 #8
0
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        network_manager = NetworkManager()
        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]
            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug("Node is already provisioning - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)), "roles"
                            in nd, "cluster_id" in nd)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                if node.meta and 'interfaces' in node.meta:
                    # we won't update interfaces if data is invalid
                    network_manager.update_interfaces_info(node.id)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node.id)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node.id)
                    network_manager.assign_networks_to_main_interface(node.id)
        return map(NodeHandler.render, nodes_updated)
コード例 #9
0
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()

        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data[u'status'])
            logger.warning(msg)
            raise error

        node = Node()
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes("disks", node_id=node.id)
        except Exception as exc:
            msg = (u"Failed to generate volumes "
                   "info for node '{0}': '{1}'").format(
                       node.name or data.get("mac") or data.get("id"),
                       str(exc) or "see logs for details")
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node.id)
            network_manager.assign_networks_to_main_interface(node.id)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(
                round(float(node.meta['memory']['total']) / 1073741824,
                      1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(
                float(
                    sum([d["size"] for d in node.meta["disks"]]) / 1000000000),
                1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify("discover",
                        "New node is discovered: %s CPUs / %s / %s " %
                        (cores, ram, hd_size),
                        node_id=node.id)
        raise web.webapi.created(json.dumps(NodeHandler.render(node),
                                            indent=4))
コード例 #10
0
ファイル: node.py プロジェクト: mahak/fuelweb
    def POST(self):
        data = self.checked_data()

        node = Node()
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes("disks", node_id=node.id)
        except Exception as exc:
            msg = (u"Failed to generate volumes "
                   "info for node '{0}': '{1}'").format(
                       node.name or data.get("mac") or data.get("id"),
                       str(exc) or "see logs for details")
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node.id)
            network_manager.assign_networks_to_main_interface(node.id)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(
                round(float(node.meta['memory']['total']) / 1073741824,
                      1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(
                float(
                    sum([d["size"] for d in node.meta["disks"]]) / 1000000000),
                1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify("discover",
                        "New node is discovered: %s CPUs / %s / %s " %
                        (cores, ram, hd_size),
                        node_id=node.id)
        raise web.webapi.created(json.dumps(NodeHandler.render(node),
                                            indent=4))
コード例 #11
0
ファイル: node.py プロジェクト: tleontovich/fuel-web
    def PUT(self):
        """:returns: Collection of JSONized Node objects.
        :http: * 200 (nodes are successfully updated)
               * 400 (invalid nodes data specified)
        """
        data = self.checked_data(self.validator.validate_collection_update)

        q = db().query(Node)
        nodes_updated = []
        for nd in data:
            is_agent = nd.pop("is_agent") if "is_agent" in nd else False
            node = None
            if "mac" in nd:
                node = q.filter_by(mac=nd["mac"]).first() \
                    or self.validator.validate_existent_node_mac_update(nd)
            else:
                node = q.get(nd["id"])
            if is_agent:
                node.timestamp = datetime.now()
                if not node.online:
                    node.online = True
                    msg = u"Node '{0}' is back online".format(
                        node.human_readable_name)
                    logger.info(msg)
                    notifier.notify("discover", msg, node_id=node.id)
                db().commit()
            old_cluster_id = node.cluster_id

            # Choosing network manager
            if nd.get('cluster_id') is not None:
                cluster = db().query(Cluster).get(nd['cluster_id'])
            else:
                cluster = node.cluster

            if cluster and cluster.net_provider == "nova_network":
                network_manager = NetworkManager()
            elif cluster and cluster.net_provider == "neutron":
                network_manager = NeutronManager()
            # essential rollback - we can't avoid it now
            elif not cluster:
                network_manager = NetworkManager()
            # /Choosing network manager

            if nd.get("pending_roles") == [] and node.cluster:
                node.cluster.clear_pending_changes(node_id=node.id)

            if "cluster_id" in nd:
                if nd["cluster_id"] is None and node.cluster:
                    node.cluster.clear_pending_changes(node_id=node.id)
                    node.roles = node.pending_roles = []
                node.cluster_id = nd["cluster_id"]

            regenerate_volumes = any(
                ('roles' in nd
                 and set(nd['roles']) != set(node.roles), 'pending_roles' in nd
                 and set(nd['pending_roles']) != set(node.pending_roles),
                 node.cluster_id != old_cluster_id))

            for key, value in nd.iteritems():
                if is_agent and (key, value) == ("status", "discover") \
                        and node.status == "provisioning":
                    # We don't update provisioning back to discover
                    logger.debug("Node is already provisioning - "
                                 "status not updated by agent")
                    continue
                if key == "meta":
                    node.update_meta(value)
                else:
                    setattr(node, key, value)
            db().commit()
            if not node.attributes:
                node.attributes = NodeAttributes()
                db().commit()
            if not node.attributes.volumes:
                node.attributes.volumes = \
                    node.volume_manager.gen_volumes_info()
                db().commit()
            if not node.status in ('provisioning', 'deploying'):
                variants = ("disks" in node.meta
                            and len(node.meta["disks"]) != len(
                                filter(lambda d: d["type"] == "disk",
                                       node.attributes.volumes)),
                            regenerate_volumes)
                if any(variants):
                    try:
                        node.attributes.volumes = \
                            node.volume_manager.gen_volumes_info()
                        if node.cluster:
                            node.cluster.add_pending_changes("disks",
                                                             node_id=node.id)
                    except Exception as exc:
                        msg = ("Failed to generate volumes "
                               "info for node '{0}': '{1}'").format(
                                   node.name or data.get("mac")
                                   or data.get("id"),
                                   str(exc) or "see logs for details")
                        logger.warning(traceback.format_exc())
                        notifier.notify("error", msg, node_id=node.id)

                db().commit()
            if is_agent:
                # Update node's NICs.
                network_manager.update_interfaces_info(node)

            nodes_updated.append(node)
            db().commit()
            if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
                if old_cluster_id:
                    network_manager.clear_assigned_networks(node)
                    network_manager.clear_all_allowed_networks(node.id)
                if nd['cluster_id']:
                    network_manager.assign_networks_by_default(node)
                    network_manager.allow_network_assignment_to_all_interfaces(
                        node)

        # we need eagerload everything that is used in render
        nodes = db().query(Node).options(
            joinedload('cluster'),
            joinedload('interfaces'),
            joinedload('interfaces.assigned_networks')).\
            filter(Node.id.in_([n.id for n in nodes_updated])).all()
        return self.render(nodes)
コード例 #12
0
ファイル: node.py プロジェクト: rakeshkk/fuelweb
    def POST(self):
        data = self.checked_data()

        node = Node()
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)
        node.name = "Untitled (%s)" % data['mac'][-5:]
        node.timestamp = datetime.now()
        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes(
                    "disks",
                    node_id=node.id
                )
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                "info for node '{0}': '{1}'"
            ).format(
                node.name or data.get("mac") or data.get("id"),
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager()
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node.id)

        if node.cluster_id:
            network_manager.allow_network_assignment_to_all_interfaces(node.id)
            network_manager.assign_networks_to_main_interface(node.id)

        try:
            ram = str(round(float(
                node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # 1000000000 because Vova said so
            hd_size = round(float(
                sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify(
            "discover",
            "New node is discovered: %s CPUs / %s / %s " %
            (cores, ram, hd_size),
            node_id=node.id
        )
        raise web.webapi.created(json.dumps(
            NodeHandler.render(node),
            indent=4
        ))