示例#1
0
    def run(self):
        super(FakeDeletionThread, self).run()
        receiver = NailgunReceiver
        kwargs = {
            'task_uuid': self.task_uuid,
            'nodes': self.data['args']['nodes'],
            'status': 'ready'
        }
        nodes_to_restore = self.data['args'].get('nodes_to_restore', [])
        resp_method = getattr(receiver, self.respond_to)
        resp_method(**kwargs)

        for node_data in nodes_to_restore:
            node = Node(**node_data)

            # Offline node just deleted from db
            # and could not recreated with status
            # discover
            if not node.online:
                continue

            node.status = 'discover'
            db().add(node)
            db().commit()
            node.attributes = NodeAttributes(node_id=node.id)
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            NetworkManager.update_interfaces_info(node)
            db().commit()

            ram = round(node.meta.get('ram') or 0, 1)
            cores = node.meta.get('cores') or 'unknown'
            notifier.notify("discover",
                            "New node with %s CPU core(s) "
                            "and %s GB memory is discovered" %
                            (cores, ram), node_id=node.id)
示例#2
0
    def test_session_update(self):
        node = Node()
        node.mac = u"ASDFGHJKLMNOPR"
        node.timestamp = datetime.now()
        self.db.add(node)
        self.db.commit()

        node2 = self.db2.query(Node).filter(Node.id == node.id).first()
        node2.mac = u"12345678"
        self.db2.add(node2)
        self.db2.commit()
        self.db.query(Node).filter(Node.id == node.id).first()
        self.assertEqual(node.mac, u"12345678")
示例#3
0
    def test_session_update(self):
        node = Node()
        node.mac = "aa:bb:cc:dd:ff:11"
        node.timestamp = datetime.now()
        self.db.add(node)
        self.db.commit()

        node2 = self.db2.query(Node).filter(Node.id == node.id).first()
        node2.mac = "aa:bb:cc:dd:ff:11"
        self.db2.add(node2)
        self.db2.commit()
        self.db.query(Node).filter(Node.id == node.id).first()
        self.assertEqual(node.mac, "aa:bb:cc:dd:ff:11")
示例#4
0
    def test_session_update(self):
        node = Node()
        node.mac = u"ASDFGHJKLMNOPR"
        node.timestamp = datetime.now()
        self.db.add(node)
        self.db.commit()

        node2 = self.db2.query(Node).filter(
            Node.id == node.id
        ).first()
        node2.mac = u"12345678"
        self.db2.add(node2)
        self.db2.commit()
        self.db.query(Node).filter(
            Node.id == node.id
        ).first()
        self.assertEqual(node.mac, u"12345678")
示例#5
0
    def test_session_update(self):
        node = Node()
        node.mac = "aa:bb:cc:dd:ff:11"
        node.timestamp = datetime.now()
        self.db.add(node)
        self.db.commit()

        node2 = self.db2.query(Node).filter(
            Node.id == node.id
        ).first()
        node2.mac = "aa:bb:cc:dd:ff:11"
        self.db2.add(node2)
        self.db2.commit()
        self.db.query(Node).filter(
            Node.id == node.id
        ).first()
        self.assertEqual(node.mac, "aa:bb:cc:dd:ff:11")
示例#6
0
    def execute(self, nodes, mclient_remove=True):
        cluster_id = None
        if hasattr(self, 'cluster'):
            cluster_id = self.cluster.id
            objects.TaskCollection.lock_cluster_tasks(cluster_id)

        logger.info("Trying to execute node deletion task with nodes %s",
                    ', '.join(str(node.id) for node in nodes))

        self.verify_nodes_with_cluster(nodes)
        objects.NodeCollection.lock_nodes(nodes)

        if cluster_id is None:
            # DeletionTask operates on cluster's nodes.
            # Nodes that are not in cluster are simply deleted.

            Node.delete_by_ids([n.id for n in nodes])
            db().flush()

            task = Task(name=consts.TASK_NAMES.node_deletion,
                        progress=100,
                        status=consts.TASK_STATUSES.ready)
            db().add(task)
            db().flush()

            return task

        task = Task(name=consts.TASK_NAMES.node_deletion,
                    cluster=self.cluster)
        db().add(task)
        for node in nodes:
            objects.Node.update(node,
                                {'status': consts.NODE_STATUSES.removing})
        db().flush()

        self._call_silently(
            task,
            tasks.DeletionTask,
            nodes=tasks.DeletionTask.prepare_nodes_for_task(
                nodes, mclient_remove=mclient_remove))

        return task
示例#7
0
    def execute(self, nodes, mclient_remove=True):
        cluster_id = None
        if hasattr(self, 'cluster'):
            cluster_id = self.cluster.id
            objects.TaskCollection.lock_cluster_tasks(cluster_id)

        logger.info("Trying to execute node deletion task with nodes %s",
                    ', '.join(str(node.id) for node in nodes))

        self.verify_nodes_with_cluster(nodes)
        objects.NodeCollection.lock_nodes(nodes)

        if cluster_id is None:
            # DeletionTask operates on cluster's nodes.
            # Nodes that are not in cluster are simply deleted.

            Node.delete_by_ids([n.id for n in nodes])
            db().flush()

            task = Task(name=consts.TASK_NAMES.node_deletion,
                        progress=100,
                        status=consts.TASK_STATUSES.ready)
            db().add(task)
            db().flush()

            return task

        task = Task(name=consts.TASK_NAMES.node_deletion, cluster=self.cluster)
        db().add(task)
        for node in nodes:
            objects.Node.update(node,
                                {'status': consts.NODE_STATUSES.removing})
        db().flush()

        self._call_silently(task,
                            tasks.DeletionTask,
                            nodes=tasks.DeletionTask.prepare_nodes_for_task(
                                nodes, mclient_remove=mclient_remove))

        return task
示例#8
0
    def remove_nodes_resp(cls, **kwargs):
        logger.info(
            "RPC method remove_nodes_resp received: %s" %
            jsonutils.dumps(kwargs)
        )
        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes') or []
        error_nodes = kwargs.get('error_nodes') or []
        inaccessible_nodes = kwargs.get('inaccessible_nodes') or []
        error_msg = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')
        if status in [consts.TASK_STATUSES.ready, consts.TASK_STATUSES.error]:
            progress = 100

        # locking tasks on cluster
        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
        objects.TaskCollection.lock_cluster_tasks(task.cluster_id)
        task = objects.Task.get_by_uuid(
            task_uuid,
            fail_if_not_found=True,
            lock_for_update=True
        )

        # locking cluster
        if task.cluster_id is not None:
            objects.Cluster.get_by_uid(
                task.cluster_id,
                fail_if_not_found=True,
                lock_for_update=True
            )

        # locking nodes
        all_nodes = itertools.chain(nodes, error_nodes, inaccessible_nodes)
        all_nodes_ids = [
            node['id'] if 'id' in node else node['uid']
            for node in all_nodes
        ]
        locked_nodes = objects.NodeCollection.filter_by_list(
            None,
            'id',
            all_nodes_ids,
            order_by='id'
        )
        objects.NodeCollection.lock_for_update(locked_nodes).all()

        def get_node_id(n):
            return n.get('id', int(n.get('uid')))

        Node.delete_by_ids([get_node_id(n) for n in nodes])

        if(len(inaccessible_nodes) > 0):
            inaccessible_node_ids = [
                get_node_id(n) for n in inaccessible_nodes]

            logger.warn(u'Nodes %s not answered by RPC, removing from db',
                        inaccessible_nodes)

            Node.delete_by_ids(inaccessible_node_ids)

        for node in error_nodes:
            node_db = objects.Node.get_by_uid(node['uid'])
            if not node_db:
                logger.error(
                    u"Failed to delete node '%s' marked as error from Astute:"
                    " node doesn't exist", str(node)
                )
            else:
                node_db.pending_deletion = False
                node_db.status = 'error'
                db().add(node_db)
                node['name'] = node_db.name
        db().flush()

        success_msg = u"No nodes were removed"
        err_msg = u"No errors occurred"
        if nodes:
            success_msg = u"Successfully removed {0} node(s)".format(
                len(nodes)
            )
            notifier.notify("done", success_msg)
        if error_nodes:
            err_msg = u"Failed to remove {0} node(s): {1}".format(
                len(error_nodes),
                ', '.join(
                    [n.get('name') or "ID: {0}".format(n['uid'])
                        for n in error_nodes])
            )
            notifier.notify("error", err_msg)
        if not error_msg:
            error_msg = ". ".join([success_msg, err_msg])
        data = {
            'status': status,
            'progress': progress,
            'message': error_msg,
        }
        objects.Task.update(task, data)

        cls._update_action_log_entry(status, task.name, task_uuid, nodes)
示例#9
0
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()

        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data.get(u'status'))
            logger.warning(msg)
            raise error

        node = Node(
            name="Untitled (%s)" % data['mac'][-5:],
            timestamp=datetime.now()
        )
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes(
                    "disks",
                    node_id=node.id
                )
        except Exception as exc:
            msg = (
                u"Failed to generate volumes "
                "info for node '{0}': '{1}'"
            ).format(
                node.name or data.get("mac") or data.get("id"),
                str(exc) or "see logs for details"
            )
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node)

        if node.cluster_id:
            network_manager = node.cluster.network_manager
            network_manager.assign_networks_by_default(node)
            network_manager.allow_network_assignment_to_all_interfaces(node)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(round(float(
                node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(float(
                sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify(
            "discover",
            "New node is discovered: %s CPUs / %s / %s " %
            (cores, ram, hd_size),
            node_id=node.id
        )
        raise web.webapi.created(json.dumps(
            NodeHandler.render(node),
            indent=4
        ))
示例#10
0
文件: base.py 项目: tsipa/fuel-web
    def create_node(self,
                    api=False,
                    exclude=None,
                    expect_http=201,
                    expect_message=None,
                    **kwargs):
        #TODO(alekseyk) Simplify 'interfaces' and 'mac' manipulation logic
        metadata = kwargs.get('meta')
        default_metadata = self.default_metadata()
        if metadata:
            default_metadata.update(metadata)
            meta_ifaces = 'interfaces' in metadata

        mac = kwargs.get('mac', self._generate_random_mac())
        if default_metadata['interfaces']:
            default_metadata['interfaces'][0]['mac'] = mac
            if not metadata or not meta_ifaces:
                for iface in default_metadata['interfaces'][1:]:
                    if 'mac' in iface:
                        iface['mac'] = self._generate_random_mac()

        node_data = {
            'mac': mac,
            'status': 'discover',
            'meta': default_metadata
        }
        if kwargs:
            meta = kwargs.pop('meta', None)
            node_data.update(kwargs)
            if meta:
                kwargs['meta'] = meta

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del node_data[ex]
                except KeyError as err:
                    logger.warning(err)
        if api:
            resp = self.app.post(reverse('NodeCollectionHandler'),
                                 json.dumps(node_data),
                                 headers=self.default_headers,
                                 expect_errors=True)
            self.tester.assertEquals(resp.status, expect_http)
            if expect_message:
                self.tester.assertEquals(resp.body, expect_message)
            if str(expect_http)[0] != "2":
                return None
            self.tester.assertEquals(resp.status, expect_http)
            node = json.loads(resp.body)
            node_db = self.db.query(Node).get(node['id'])
            if 'interfaces' not in node_data['meta'] \
                    or not node_data['meta']['interfaces']:
                self._set_interfaces_if_not_set_in_meta(
                    node_db.id, kwargs.get('meta', None))
            self.nodes.append(node_db)
        else:
            node = Node()
            node.timestamp = datetime.now()
            if 'cluster_id' in node_data:
                cluster_id = node_data.pop('cluster_id')
                for cluster in self.clusters:
                    if cluster.id == cluster_id:
                        node.cluster = cluster
                        break
                else:
                    node.cluster_id = cluster_id
            for key, value in node_data.iteritems():
                setattr(node, key, value)
            node.attributes = self.create_attributes()
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            self.db.add(node)
            self.db.commit()
            if node.meta and node.meta.get('interfaces'):
                self._create_interfaces_from_meta(node)

            self.nodes.append(node)

        return node
示例#11
0
文件: base.py 项目: e0ne/fuel-web
    def create_node(
            self, api=False,
            exclude=None, expect_http=201,
            expect_message=None,
            **kwargs):
        metadata = kwargs.get('meta')
        default_metadata = self.default_metadata()
        if metadata:
            default_metadata.update(metadata)

        mac = self._generate_random_mac()
        if default_metadata['interfaces']:
            default_metadata['interfaces'][0]['mac'] = kwargs.get('mac', mac)

        node_data = {
            'mac': mac,
            'status': 'discover',
            'meta': default_metadata
        }
        if kwargs:
            meta = kwargs.pop('meta', None)
            node_data.update(kwargs)
            if meta:
                kwargs['meta'] = meta

        if exclude and isinstance(exclude, list):
            for ex in exclude:
                try:
                    del node_data[ex]
                except KeyError as err:
                    logger.warning(err)
        if api:
            resp = self.app.post(
                reverse('NodeCollectionHandler'),
                json.dumps(node_data),
                headers=self.default_headers,
                expect_errors=True
            )
            self.tester.assertEquals(resp.status, expect_http)
            if expect_message:
                self.tester.assertEquals(resp.body, expect_message)
            if str(expect_http)[0] != "2":
                return None
            self.tester.assertEquals(resp.status, expect_http)
            node = json.loads(resp.body)
            node_db = self.db.query(Node).get(node['id'])
            self._set_interfaces_if_not_set_in_meta(
                node_db.id,
                kwargs.get('meta', None))
            self.nodes.append(node_db)
        else:
            node = Node()
            node.timestamp = datetime.now()
            if 'cluster_id' in node_data:
                cluster_id = node_data.pop('cluster_id')
                for cluster in self.clusters:
                    if cluster.id == cluster_id:
                        node.cluster = cluster
                        break
                else:
                    node.cluster_id = cluster_id
            for key, value in node_data.iteritems():
                setattr(node, key, value)
            node.attributes = self.create_attributes()
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            self.db.add(node)
            self.db.commit()
            if node.meta and node.meta.get('interfaces'):
                self._create_interfaces_from_meta(node)

            self.nodes.append(node)

        return node
示例#12
0
    def remove_nodes_resp(cls, **kwargs):
        logger.info(
            "RPC method remove_nodes_resp received: %s" %
            jsonutils.dumps(kwargs)
        )
        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes') or []
        error_nodes = kwargs.get('error_nodes') or []
        inaccessible_nodes = kwargs.get('inaccessible_nodes') or []
        error_msg = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')
        if status in [consts.TASK_STATUSES.ready, consts.TASK_STATUSES.error]:
            progress = 100

        # locking tasks on cluster
        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
        objects.TaskCollection.lock_cluster_tasks(task.cluster_id)
        task = objects.Task.get_by_uuid(
            task_uuid,
            fail_if_not_found=True,
            lock_for_update=True
        )

        # locking cluster
        if task.cluster_id is not None:
            objects.Cluster.get_by_uid(
                task.cluster_id,
                fail_if_not_found=True,
                lock_for_update=True
            )

        # locking nodes
        all_nodes = itertools.chain(nodes, error_nodes, inaccessible_nodes)
        all_nodes_ids = [
            node['id'] if 'id' in node else node['uid']
            for node in all_nodes
        ]
        locked_nodes = objects.NodeCollection.filter_by_list(
            None,
            'id',
            all_nodes_ids,
            order_by='id'
        )
        objects.NodeCollection.lock_for_update(locked_nodes).all()

        def get_node_id(n):
            return n.get('id', int(n.get('uid')))

        Node.delete_by_ids([get_node_id(n) for n in nodes])

        if(len(inaccessible_nodes) > 0):
            inaccessible_node_ids = [
                get_node_id(n) for n in inaccessible_nodes]

            logger.warn(u'Nodes %s not answered by RPC, removing from db',
                        inaccessible_nodes)

            Node.delete_by_ids(inaccessible_node_ids)

        for node in error_nodes:
            node_db = objects.Node.get_by_uid(node['uid'])
            if not node_db:
                logger.error(
                    u"Failed to delete node '%s' marked as error from Astute:"
                    " node doesn't exist", str(node)
                )
            else:
                node_db.pending_deletion = False
                node_db.status = 'error'
                db().add(node_db)
                node['name'] = node_db.name
        db().flush()

        success_msg = u"No nodes were removed"
        err_msg = u"No errors occurred"
        if nodes:
            success_msg = u"Successfully removed {0} node(s)".format(
                len(nodes)
            )
            notifier.notify("done", success_msg)
        if error_nodes:
            err_msg = u"Failed to remove {0} node(s): {1}".format(
                len(error_nodes),
                ', '.join(
                    [n.get('name') or "ID: {0}".format(n['uid'])
                        for n in error_nodes])
            )
            notifier.notify("error", err_msg)
        if not error_msg:
            error_msg = ". ".join([success_msg, err_msg])
        data = {
            'status': status,
            'progress': progress,
            'message': error_msg,
        }
        objects.Task.update(task, data)

        cls._update_action_log_entry(status, task.name, task_uuid, nodes)
示例#13
0
    def execute(self, nodes_to_delete, mclient_remove=True):
        cluster_id = None
        if hasattr(self, 'cluster'):
            cluster_id = self.cluster.id
            objects.TaskCollection.lock_cluster_tasks(cluster_id)

        logger.info("Trying to execute node deletion task with nodes %s",
                    ', '.join(str(node.id) for node in nodes_to_delete))

        self.verify_nodes_with_cluster(nodes_to_delete)
        objects.NodeCollection.lock_nodes(nodes_to_delete)

        if cluster_id is None:
            # DeletionTask operates on cluster's nodes.
            # Nodes that are not in cluster are simply deleted.

            Node.delete_by_ids([n.id for n in nodes_to_delete])
            db().flush()

            task = Task(name=consts.TASK_NAMES.node_deletion,
                        progress=100,
                        status=consts.TASK_STATUSES.ready)
            db().add(task)
            db().flush()

            return task

        task = Task(name=consts.TASK_NAMES.node_deletion,
                    cluster=self.cluster)
        db().add(task)
        for node in nodes_to_delete:
            objects.Node.update(node,
                                {'status': consts.NODE_STATUSES.removing})
        db().flush()

        nodes_to_deploy = []
        objects.Cluster.adjust_nodes_lists_on_controller_removing(
            self.cluster, nodes_to_delete, nodes_to_deploy)

        if nodes_to_deploy:
            objects.NodeCollection.lock_nodes(nodes_to_deploy)
            # updating nodes
            objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
            logger.debug("There are nodes to deploy: %s",
                         " ".join([n.fqdn for n in nodes_to_deploy]))
            task_deployment = task.create_subtask(
                consts.TASK_NAMES.deployment)

            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                nodes_to_deploy,
                method_name='message'
            )
            db().flush()

            # if failed to generate task message for orchestrator
            # then task is already set to error
            if task_deployment.status == consts.TASK_STATUSES.error:
                return task_deployment

            rpc.cast('naily', [deployment_message])

        db().commit()

        self._call_silently(
            task,
            tasks.DeletionTask,
            nodes=tasks.DeletionTask.prepare_nodes_for_task(
                nodes_to_delete, mclient_remove=mclient_remove))

        return task
示例#14
0
文件: node.py 项目: tsipa/fuel-web
    def POST(self):
        """:returns: JSONized Node object.
        :http: * 201 (cluster successfully created)
               * 400 (invalid node data specified)
               * 403 (node has incorrect status)
               * 409 (node with such parameters already exists)
        """
        data = self.checked_data()
        if data.get("status", "") != "discover":
            error = web.forbidden()
            error.data = "Only bootstrap nodes are allowed to be registered."
            msg = u"Node with mac '{0}' was not created, " \
                  u"because request status is '{1}'."\
                .format(data[u'mac'], data.get(u'status'))
            logger.warning(msg)
            raise error
        node = Node(name="Untitled (%s)" % data['mac'][-5:],
                    timestamp=datetime.now())
        if "cluster_id" in data:
            # FIXME(vk): this part is needed only for tests. Normally,
            # nodes are created only by agent and POST requests don't contain
            # cluster_id, but our integration and unit tests widely use it.
            # We need to assign cluster first
            cluster_id = data.pop("cluster_id")
            if cluster_id:
                node.cluster = db.query(Cluster).get(cluster_id)
        for key, value in data.iteritems():
            if key == "id":
                continue
            elif key == "meta":
                node.create_meta(value)
            else:
                setattr(node, key, value)

        db().add(node)
        db().commit()
        node.attributes = NodeAttributes()

        try:
            node.attributes.volumes = node.volume_manager.gen_volumes_info()
            if node.cluster:
                node.cluster.add_pending_changes("disks", node_id=node.id)
        except Exception as exc:
            msg = (u"Failed to generate volumes "
                   "info for node '{0}': '{1}'").format(
                       node.name or data.get("mac") or data.get("id"),
                       str(exc) or "see logs for details")
            logger.warning(traceback.format_exc())
            notifier.notify("error", msg, node_id=node.id)
        db().add(node)
        db().commit()

        network_manager = NetworkManager
        # Add interfaces for node from 'meta'.
        if node.meta and node.meta.get('interfaces'):
            network_manager.update_interfaces_info(node)

        if node.cluster_id:
            network_manager = node.cluster.network_manager
            network_manager.assign_networks_by_default(node)

        try:
            # we use multiplier of 1024 because there are no problems here
            # with unfair size calculation
            ram = str(
                round(float(node.meta['memory']['total']) / 1073741824,
                      1)) + " GB RAM"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            ram = "unknown RAM"

        try:
            # we use multiplier of 1000 because disk vendors specify HDD size
            # in terms of decimal capacity. Sources:
            # http://knowledge.seagate.com/articles/en_US/FAQ/172191en
            # http://physics.nist.gov/cuu/Units/binary.html
            hd_size = round(
                float(
                    sum([d["size"] for d in node.meta["disks"]]) / 1000000000),
                1)
            # if HDD > 100 GB we show it's size in TB
            if hd_size > 100:
                hd_size = str(hd_size / 1000) + " TB HDD"
            else:
                hd_size = str(hd_size) + " GB HDD"
        except Exception as exc:
            logger.warning(traceback.format_exc())
            hd_size = "unknown HDD"

        cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
        notifier.notify("discover",
                        "New node is discovered: %s CPUs / %s / %s " %
                        (cores, ram, hd_size),
                        node_id=node.id)
        raise web.webapi.created(json.dumps(NodeHandler.render(node),
                                            indent=4))