def create_node(self, api=False, exclude=None, expect_http=201, expect_message=None, **kwargs): default_metadata = self.default_metadata() mac = self._generate_random_mac() default_metadata['interfaces'][0]['mac'] = mac node_data = { 'mac': mac, 'role': 'controller', 'status': 'discover', 'meta': default_metadata } if kwargs: node_data.update(kwargs) if exclude and isinstance(exclude, list): for ex in exclude: try: del node_data[ex] except KeyError as err: logging.warning(err) if api: resp = self.app.post(reverse('NodeCollectionHandler'), json.dumps(node_data), headers=self.default_headers, expect_errors=True) self.tester.assertEquals(resp.status, expect_http) if expect_message: self.tester.assertEquals(resp.body, expect_message) if str(expect_http)[0] != "2": return None self.tester.assertEquals(resp.status, expect_http) node = json.loads(resp.body) node_db = self.db.query(Node).get(node['id']) self._set_interfaces_if_not_set_in_meta(node_db, kwargs.get('meta', None)) self.nodes.append(node_db) else: node = Node() node.timestamp = datetime.now() if not node_data.get('meta'): node_data['meta'] = default_metadata else: node_data['meta'].update(default_metadata) for key, value in node_data.iteritems(): setattr(node, key, value) node.attributes = self.create_attributes() node.attributes.volumes = node.volume_manager.gen_volumes_info() self.db.add(node) self.db.commit() self._set_interfaces_if_not_set_in_meta(node, kwargs.get('meta', None)) self.nodes.append(node) return node
def run(self): super(FakeDeletionThread, self).run() receiver = NailgunReceiver kwargs = { 'task_uuid': self.task_uuid, 'nodes': self.data['args']['nodes'], 'status': 'ready' } nodes_to_restore = self.data['args'].get('nodes_to_restore', []) resp_method = getattr(receiver, self.respond_to) resp_method(**kwargs) for node_data in nodes_to_restore: node = Node(**node_data) # Offline node just deleted from db # and could not recreated with status # discover if not node.online: continue node.status = 'discover' db().add(node) db().commit() node.attributes = NodeAttributes(node_id=node.id) node.attributes.volumes = node.volume_manager.gen_volumes_info() NetworkManager.update_interfaces_info(node) db().commit() ram = round(node.meta.get('ram') or 0, 1) cores = node.meta.get('cores') or 'unknown' notifier.notify("discover", "New node with %s CPU core(s) " "and %s GB memory is discovered" % (cores, ram), node_id=node.id)
def POST(self): data = self.validator.validate(web.data()) node = Node() for key, value in data.iteritems(): setattr(node, key, value) node.name = "Untitled (%s)" % data['mac'][-5:] node.timestamp = datetime.now() self.db.add(node) self.db.commit() node.attributes = NodeAttributes() try: node.attributes.volumes = node.volume_manager.gen_volumes_info() if node.cluster: node.cluster.add_pending_changes( "disks", node_id=node.id ) except Exception as exc: msg = ( u"Failed to generate volumes " "info for node '{0}': '{1}'" ).format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details" ) logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) self.db.add(node) self.db.commit() # Add interfaces for node from 'meta'. if node.meta and node.meta.get('interfaces'): nics = self.get_nics_from_meta(node) map(self.db.add, nics) self.db.commit() if node.cluster_id: self.allow_network_assignment_to_all_interfaces(node) self.assign_networks_to_main_interface(node) self.db.commit() try: ram = str(round(float( node.meta['memory']['total']) / 1073741824, 1)) except (KeyError, TypeError, ValueError): ram = "unknown" cores = str(node.meta.get('cpu', {}).get('total', "unknown")) notifier.notify("discover", "New node with %s CPU core(s) " "and %s GB memory is discovered" % (cores, ram), node_id=node.id) raise web.webapi.created(json.dumps( NodeHandler.render(node), indent=4 ))
def test_session_update(self): node = Node() node.mac = u"ASDFGHJKLMNOPR" node.timestamp = datetime.now() self.db.add(node) self.db.commit() node2 = self.db2.query(Node).filter(Node.id == node.id).first() node2.mac = u"12345678" self.db2.add(node2) self.db2.commit() self.db.query(Node).filter(Node.id == node.id).first() self.assertEquals(node.mac, u"12345678")
def test_session_update(self): node = Node() node.mac = u"ASDFGHJKLMNOPR" node.timestamp = datetime.now() self.db.add(node) self.db.commit() node2 = self.db2.query(Node).filter( Node.id == node.id ).first() node2.mac = u"12345678" self.db2.add(node2) self.db2.commit() self.db.query(Node).filter( Node.id == node.id ).first() self.assertEquals(node.mac, u"12345678")
def run(self): super(FakeDeletionThread, self).run() receiver = NailgunReceiver kwargs = { 'task_uuid': self.task_uuid, 'nodes': self.data['args']['nodes'], 'status': 'ready' } nodes_to_restore = self.data['args'].get('nodes_to_restore', []) tick_interval = int(settings.FAKE_TASKS_TICK_INTERVAL) or 3 resp_method = getattr(receiver, self.respond_to) resp_method(**kwargs) for node_data in nodes_to_restore: node = Node(**node_data) # Offline node just deleted from db # and could not recreated with status # discover if not node.online: continue node.status = 'discover' db().add(node) db().commit() node.attributes = NodeAttributes(node_id=node.id) node.attributes.volumes = node.volume_manager.gen_volumes_info() network_manager = NetworkManager() network_manager.update_interfaces_info(node.id) db().commit() ram = round(node.meta.get('ram') or 0, 1) cores = node.meta.get('cores') or 'unknown' notifier.notify("discover", "New node with %s CPU core(s) " "and %s GB memory is discovered" % (cores, ram), node_id=node.id)
def create_node( self, api=False, exclude=None, expect_http=201, expect_message=None, **kwargs): metadata = kwargs.get('meta') default_metadata = self.default_metadata() if metadata: default_metadata.update(metadata) mac = self._generate_random_mac() if default_metadata['interfaces']: default_metadata['interfaces'][0]['mac'] = kwargs.get('mac', mac) node_data = { 'mac': mac, 'roles': ['controller'], 'status': 'discover', 'meta': default_metadata } if kwargs: meta = kwargs.pop('meta', None) node_data.update(kwargs) if meta: kwargs['meta'] = meta if exclude and isinstance(exclude, list): for ex in exclude: try: del node_data[ex] except KeyError as err: logging.warning(err) if api: resp = self.app.post( reverse('NodeCollectionHandler'), json.dumps(node_data), headers=self.default_headers, expect_errors=True ) self.tester.assertEquals(resp.status, expect_http) if expect_message: self.tester.assertEquals(resp.body, expect_message) if str(expect_http)[0] != "2": return None self.tester.assertEquals(resp.status, expect_http) node = json.loads(resp.body) node_db = self.db.query(Node).get(node['id']) self._set_interfaces_if_not_set_in_meta( node_db.id, kwargs.get('meta', None)) self.nodes.append(node_db) else: node = Node() node.timestamp = datetime.now() if 'cluster_id' in node_data: cluster_id = node_data.pop('cluster_id') for cluster in self.clusters: if cluster.id == cluster_id: node.cluster = cluster break else: node.cluster_id = cluster_id for key, value in node_data.iteritems(): setattr(node, key, value) node.attributes = self.create_attributes() node.attributes.volumes = node.volume_manager.gen_volumes_info() self.db.add(node) self.db.commit() if node.meta and node.meta.get('interfaces'): self._create_interfaces_from_meta(node) self.nodes.append(node) return node
def POST(self): """:returns: JSONized Node object. :http: * 201 (cluster successfully created) * 400 (invalid node data specified) * 403 (node has incorrect status) * 409 (node with such parameters already exists) """ data = self.checked_data() if data.get("status", "") != "discover": error = web.forbidden() error.data = "Only bootstrap nodes are allowed to be registered." msg = u"Node with mac '{0}' was not created, " \ u"because request status is '{1}'."\ .format(data[u'mac'], data[u'status']) logger.warning(msg) raise error node = Node() if "cluster_id" in data: # FIXME(vk): this part is needed only for tests. Normally, # nodes are created only by agent and POST requests don't contain # cluster_id, but our integration and unit tests widely use it. # We need to assign cluster first cluster_id = data.pop("cluster_id") if cluster_id: node.cluster = db.query(Cluster).get(cluster_id) for key, value in data.iteritems(): if key == "id": continue elif key == "meta": node.create_meta(value) else: setattr(node, key, value) node.name = "Untitled (%s)" % data['mac'][-5:] node.timestamp = datetime.now() db().add(node) db().commit() node.attributes = NodeAttributes() try: node.attributes.volumes = node.volume_manager.gen_volumes_info() if node.cluster: node.cluster.add_pending_changes( "disks", node_id=node.id ) except Exception as exc: msg = ( u"Failed to generate volumes " "info for node '{0}': '{1}'" ).format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details" ) logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) db().add(node) db().commit() network_manager = NetworkManager() # Add interfaces for node from 'meta'. if node.meta and node.meta.get('interfaces'): network_manager.update_interfaces_info(node.id) if node.cluster_id: network_manager.allow_network_assignment_to_all_interfaces(node) network_manager.assign_networks_to_main_interface(node) try: # we use multiplier of 1024 because there are no problems here # with unfair size calculation ram = str(round(float( node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" except Exception as exc: logger.warning(traceback.format_exc()) ram = "unknown RAM" try: # we use multiplier of 1000 because disk vendors specify HDD size # in terms of decimal capacity. Sources: # http://knowledge.seagate.com/articles/en_US/FAQ/172191en # http://physics.nist.gov/cuu/Units/binary.html hd_size = round(float( sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1) # if HDD > 100 GB we show it's size in TB if hd_size > 100: hd_size = str(hd_size / 1000) + " TB HDD" else: hd_size = str(hd_size) + " GB HDD" except Exception as exc: logger.warning(traceback.format_exc()) hd_size = "unknown HDD" cores = str(node.meta.get('cpu', {}).get('total', "unknown")) notifier.notify( "discover", "New node is discovered: %s CPUs / %s / %s " % (cores, ram, hd_size), node_id=node.id ) raise web.webapi.created(json.dumps( NodeHandler.render(node), indent=4 ))
def POST(self): data = self.checked_data() node = Node() for key, value in data.iteritems(): if key == "id": continue elif key == "meta": node.create_meta(value) else: setattr(node, key, value) node.name = "Untitled (%s)" % data['mac'][-5:] node.timestamp = datetime.now() db().add(node) db().commit() node.attributes = NodeAttributes() try: node.attributes.volumes = node.volume_manager.gen_volumes_info() if node.cluster: node.cluster.add_pending_changes( "disks", node_id=node.id ) except Exception as exc: msg = ( u"Failed to generate volumes " "info for node '{0}': '{1}'" ).format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details" ) logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) db().add(node) db().commit() network_manager = NetworkManager() # Add interfaces for node from 'meta'. if node.meta and node.meta.get('interfaces'): network_manager.update_interfaces_info(node.id) if node.cluster_id: network_manager.allow_network_assignment_to_all_interfaces(node.id) network_manager.assign_networks_to_main_interface(node.id) try: # we use multiplier of 1024 because there are no problems here # with unfair size calculation ram = str(round(float( node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" except Exception as exc: logger.warning(traceback.format_exc()) ram = "unknown RAM" try: # we use multiplier of 1000 because disk vendors specify HDD size # in terms of decimal capacity. Sources: # http://knowledge.seagate.com/articles/en_US/FAQ/172191en # http://physics.nist.gov/cuu/Units/binary.html hd_size = round(float( sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1) # if HDD > 100 GB we show it's size in TB if hd_size > 100: hd_size = str(hd_size / 1000) + " TB HDD" else: hd_size = str(hd_size) + " GB HDD" except Exception as exc: logger.warning(traceback.format_exc()) hd_size = "unknown HDD" cores = str(node.meta.get('cpu', {}).get('total', "unknown")) notifier.notify( "discover", "New node is discovered: %s CPUs / %s / %s " % (cores, ram, hd_size), node_id=node.id ) raise web.webapi.created(json.dumps( NodeHandler.render(node), indent=4 ))
def POST(self): """:returns: JSONized Node object. :http: * 201 (cluster successfully created) * 400 (invalid node data specified) * 403 (node has incorrect status) * 409 (node with such parameters already exists) """ data = self.checked_data() if data.get("status", "") != "discover": error = web.forbidden() error.data = "Only bootstrap nodes are allowed to be registered." msg = u"Node with mac '{0}' was not created, " \ u"because request status is '{1}'."\ .format(data[u'mac'], data[u'status']) logger.warning(msg) raise error node = Node() if "cluster_id" in data: # FIXME(vk): this part is needed only for tests. Normally, # nodes are created only by agent and POST requests don't contain # cluster_id, but our integration and unit tests widely use it. # We need to assign cluster first cluster_id = data.pop("cluster_id") if cluster_id: node.cluster = db.query(Cluster).get(cluster_id) for key, value in data.iteritems(): if key == "id": continue elif key == "meta": node.create_meta(value) else: setattr(node, key, value) node.name = "Untitled (%s)" % data['mac'][-5:] node.timestamp = datetime.now() db().add(node) db().commit() node.attributes = NodeAttributes() try: node.attributes.volumes = node.volume_manager.gen_volumes_info() if node.cluster: node.cluster.add_pending_changes("disks", node_id=node.id) except Exception as exc: msg = (u"Failed to generate volumes " "info for node '{0}': '{1}'").format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details") logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) db().add(node) db().commit() network_manager = NetworkManager() # Add interfaces for node from 'meta'. if node.meta and node.meta.get('interfaces'): network_manager.update_interfaces_info(node.id) if node.cluster_id: network_manager.allow_network_assignment_to_all_interfaces(node.id) network_manager.assign_networks_to_main_interface(node.id) try: # we use multiplier of 1024 because there are no problems here # with unfair size calculation ram = str( round(float(node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" except Exception as exc: logger.warning(traceback.format_exc()) ram = "unknown RAM" try: # we use multiplier of 1000 because disk vendors specify HDD size # in terms of decimal capacity. Sources: # http://knowledge.seagate.com/articles/en_US/FAQ/172191en # http://physics.nist.gov/cuu/Units/binary.html hd_size = round( float( sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1) # if HDD > 100 GB we show it's size in TB if hd_size > 100: hd_size = str(hd_size / 1000) + " TB HDD" else: hd_size = str(hd_size) + " GB HDD" except Exception as exc: logger.warning(traceback.format_exc()) hd_size = "unknown HDD" cores = str(node.meta.get('cpu', {}).get('total', "unknown")) notifier.notify("discover", "New node is discovered: %s CPUs / %s / %s " % (cores, ram, hd_size), node_id=node.id) raise web.webapi.created(json.dumps(NodeHandler.render(node), indent=4))
def execute(self, task, respond_to='remove_nodes_resp'): logger.debug("DeletionTask.execute(task=%s)" % task.uuid) task_uuid = task.uuid logger.debug("Nodes deletion task is running") nodes_to_delete = [] nodes_to_restore = [] USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP # no need to call naily if there are no nodes in cluster if respond_to == 'remove_cluster_resp' and \ not list(task.cluster.nodes): rcvr = rpc.receiver.NailgunReceiver() rcvr.initialize() rcvr.remove_cluster_resp( task_uuid=task_uuid, status='ready', progress=100 ) return for node in task.cluster.nodes: if node.pending_deletion: nodes_to_delete.append({ 'id': node.id, 'uid': node.id, 'role': node.role }) if USE_FAKE: # only fake tasks new_node = Node() keep_attrs = ( 'id', 'cluster_id', 'role', 'pending_deletion', 'pending_addition' ) for prop in object_mapper(new_node).iterate_properties: if isinstance( prop, ColumnProperty ) and prop.key not in keep_attrs: setattr( new_node, prop.key, getattr(node, prop.key) ) nodes_to_restore.append(new_node) # /only fake tasks # Deletion offline nodes from db if nodes_to_delete: for node in list(nodes_to_delete): node_db = orm().query(Node).get(node['id']) if not node_db.online: slave_name = TaskHelper.make_slave_name( node['id'], node['role'] ) logger.info( "Node %s is offline, removing node from db" % slave_name) orm().delete(node_db) orm().commit() nodes_to_delete.remove(node) # only real tasks engine_nodes = [] if not USE_FAKE: if nodes_to_delete: logger.debug("There are nodes to delete") for node in nodes_to_delete: slave_name = TaskHelper.make_slave_name( node['id'], node['role'] ) engine_nodes.append(slave_name) try: logger.info("Deleting old certs from puppet..") node_db = orm().query(Node).get(node['id']) if node_db and node_db.fqdn: node_hostname = node_db.fqdn else: node_hostname = '.'.join([ slave_name, settings.DNS_DOMAIN]) cmd = "puppet cert clean {0}".format(node_hostname) proc = subprocess.Popen( shlex.split(cmd), shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) p_stdout, p_stderr = proc.communicate() logger.info( "'{0}' executed, STDOUT: '{1}'," " STDERR: '{2}'".format( cmd, p_stdout, p_stderr ) ) except OSError: logger.warning( "'{0}' returned non-zero exit code".format( cmd ) ) except Exception as e: logger.warning("Exception occurred while trying to \ remove the system from Cobbler: '{0}'".format( e.message)) # /only real tasks msg_delete = { 'method': 'remove_nodes', 'respond_to': respond_to, 'args': { 'task_uuid': task.uuid, 'nodes': nodes_to_delete, 'engine': { 'url': settings.COBBLER_URL, 'username': settings.COBBLER_USER, 'password': settings.COBBLER_PASSWORD, }, 'engine_nodes': engine_nodes } } # only fake tasks if USE_FAKE and nodes_to_restore: msg_delete['args']['nodes_to_restore'] = nodes_to_restore # /only fake tasks logger.debug("Calling rpc remove_nodes method") rpc.cast('naily', msg_delete)
def POST(self): data = self.checked_data() node = Node() for key, value in data.iteritems(): if key == "id": continue elif key == "meta": node.create_meta(value) else: setattr(node, key, value) node.name = "Untitled (%s)" % data['mac'][-5:] node.timestamp = datetime.now() db().add(node) db().commit() node.attributes = NodeAttributes() try: node.attributes.volumes = node.volume_manager.gen_volumes_info() if node.cluster: node.cluster.add_pending_changes("disks", node_id=node.id) except Exception as exc: msg = (u"Failed to generate volumes " "info for node '{0}': '{1}'").format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details") logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) db().add(node) db().commit() network_manager = NetworkManager() # Add interfaces for node from 'meta'. if node.meta and node.meta.get('interfaces'): network_manager.update_interfaces_info(node.id) if node.cluster_id: network_manager.allow_network_assignment_to_all_interfaces(node.id) network_manager.assign_networks_to_main_interface(node.id) try: # we use multiplier of 1024 because there are no problems here # with unfair size calculation ram = str( round(float(node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" except Exception as exc: logger.warning(traceback.format_exc()) ram = "unknown RAM" try: # we use multiplier of 1000 because disk vendors specify HDD size # in terms of decimal capacity. Sources: # http://knowledge.seagate.com/articles/en_US/FAQ/172191en # http://physics.nist.gov/cuu/Units/binary.html hd_size = round( float( sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1) # if HDD > 100 GB we show it's size in TB if hd_size > 100: hd_size = str(hd_size / 1000) + " TB HDD" else: hd_size = str(hd_size) + " GB HDD" except Exception as exc: logger.warning(traceback.format_exc()) hd_size = "unknown HDD" cores = str(node.meta.get('cpu', {}).get('total', "unknown")) notifier.notify("discover", "New node is discovered: %s CPUs / %s / %s " % (cores, ram, hd_size), node_id=node.id) raise web.webapi.created(json.dumps(NodeHandler.render(node), indent=4))
def POST(self): data = self.checked_data() node = Node() for key, value in data.iteritems(): if key == "id": continue elif key == "meta": node.create_meta(value) else: setattr(node, key, value) node.name = "Untitled (%s)" % data['mac'][-5:] node.timestamp = datetime.now() db().add(node) db().commit() node.attributes = NodeAttributes() try: node.attributes.volumes = node.volume_manager.gen_volumes_info() if node.cluster: node.cluster.add_pending_changes( "disks", node_id=node.id ) except Exception as exc: msg = ( u"Failed to generate volumes " "info for node '{0}': '{1}'" ).format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details" ) logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) db().add(node) db().commit() network_manager = NetworkManager() # Add interfaces for node from 'meta'. if node.meta and node.meta.get('interfaces'): network_manager.update_interfaces_info(node.id) if node.cluster_id: network_manager.allow_network_assignment_to_all_interfaces(node.id) network_manager.assign_networks_to_main_interface(node.id) try: ram = str(round(float( node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM" except Exception as exc: logger.warning(traceback.format_exc()) ram = "unknown RAM" try: # 1000000000 because Vova said so hd_size = round(float( sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1) # if HDD > 100 GB we show it's size in TB if hd_size > 100: hd_size = str(hd_size / 1000) + " TB HDD" else: hd_size = str(hd_size) + " GB HDD" except Exception as exc: logger.warning(traceback.format_exc()) hd_size = "unknown HDD" cores = str(node.meta.get('cpu', {}).get('total', "unknown")) notifier.notify( "discover", "New node is discovered: %s CPUs / %s / %s " % (cores, ram, hd_size), node_id=node.id ) raise web.webapi.created(json.dumps( NodeHandler.render(node), indent=4 ))