def _success_action(cls, task, status, progress): network_manager = NetworkManager() # check if all nodes are ready if any(map(lambda n: n.status == 'error', task.cluster.nodes)): cls._error_action(task, 'error', 100) return if task.cluster.mode in ('singlenode', 'multinode'): # determining horizon url - it's an IP # of a first cluster controller controller = db().query(Node).filter_by( cluster_id=task.cluster_id).filter( Node.role_list.any(name='controller')).first() if controller: logger.debug( u"Controller is found, node_id=%s, " "getting it's IP addresses", controller.id) public_net = filter( lambda n: n['name'] == 'public' and 'ip' in n, network_manager.get_node_networks(controller.id)) if public_net: horizon_ip = public_net[0]['ip'].split('/')[0] message = ( u"Deployment of environment '{0}' is done. " "Access the OpenStack dashboard (Horizon) at " "http://{1}/ or via internal network at http://{2}/" ).format(task.cluster.name, horizon_ip, controller.ip) else: message = ( u"Deployment of environment '{0}' is done").format( task.cluster.name) logger.warning(u"Public ip for controller node " "not found in '{0}'".format( task.cluster.name)) else: message = (u"Deployment of environment" " '{0}' is done").format(task.cluster.name) logger.warning(u"Controller node not found in '{0}'".format( task.cluster.name)) elif task.cluster.is_ha_mode: # determining horizon url in HA mode - it's vip # from a public network saved in task cache try: netmanager = NetworkManager() message = ( u"Deployment of environment '{0}' is done. " "Access the OpenStack dashboard (Horizon) at {1}").format( task.cluster.name, netmanager.get_horizon_url(task.cluster.id)) except Exception as exc: logger.error(": ".join([str(exc), traceback.format_exc()])) message = (u"Deployment of environment" " '{0}' is done").format(task.cluster.name) logger.warning(u"Cannot find virtual IP for '{0}'".format( task.cluster.name)) notifier.notify("done", message, task.cluster_id) TaskHelper.update_task_status(task.uuid, status, progress, message)
def update(cls, cluster, network_configuration): from nailgun.network.manager import NetworkManager network_manager = NetworkManager() if 'net_manager' in network_configuration: setattr(cluster, 'net_manager', network_configuration['net_manager']) if 'dns_nameservers' in network_configuration: setattr(cluster, 'dns_nameservers', network_configuration['dns_nameservers']['nameservers']) if 'networks' in network_configuration: for ng in network_configuration['networks']: if ng['id'] == network_manager.get_admin_network_group_id(): continue ng_db = db().query(NetworkGroup).get(ng['id']) for key, value in ng.iteritems(): if key == "ip_ranges": cls._set_ip_ranges(ng['id'], value) else: if key == 'cidr' and \ not ng['name'] in ('public', 'floating'): network_manager.update_ranges_from_cidr( ng_db, value) setattr(ng_db, key, value) network_manager.create_networks(ng_db) ng_db.cluster.add_pending_changes('networks')
def POST(self): """:returns: JSONized Cluster object. :http: * 201 (cluster successfully created) * 400 (invalid cluster data specified) * 409 (cluster with such parameters already exists) """ # It's used for cluster creating only. data = self.checked_data() cluster = Cluster() cluster.release = db().query(Release).get(data["release"]) # TODO(NAME): use fields for field in ('name', 'mode', 'net_manager'): if data.get(field): setattr(cluster, field, data.get(field)) db().add(cluster) db().commit() attributes = Attributes( editable=cluster.release.attributes_metadata.get("editable"), generated=cluster.release.attributes_metadata.get("generated"), cluster=cluster ) attributes.generate_fields() netmanager = NetworkManager() try: netmanager.create_network_groups(cluster.id) cluster.add_pending_changes("attributes") cluster.add_pending_changes("networks") if 'nodes' in data and data['nodes']: nodes = db().query(Node).filter( Node.id.in_(data['nodes']) ).all() map(cluster.nodes.append, nodes) db().commit() for node in nodes: netmanager.allow_network_assignment_to_all_interfaces( node.id ) netmanager.assign_networks_to_main_interface(node.id) raise web.webapi.created(json.dumps( ClusterHandler.render(cluster), indent=4 )) except ( errors.OutOfVLANs, errors.OutOfIPs, errors.NoSuitableCIDR, errors.InvalidNetworkPool ) as e: # Cluster was created in this request, # so we no need to use ClusterDeletionManager. # All relations wiil be cascade deleted automaticly. # TODO(NAME): investigate transactions db().delete(cluster) raise web.badrequest(e.message)
def PUT(self, cluster_id): cluster = self.get_object_or_404(Cluster, cluster_id) data = self.checked_data() network_manager = NetworkManager() for key, value in data.iteritems(): if key == "nodes": # Todo: sepatate nodes for deletion and addition by set(). new_nodes = db().query(Node).filter(Node.id.in_(value)) nodes_to_remove = [ n for n in cluster.nodes if n not in new_nodes ] nodes_to_add = [n for n in new_nodes if n not in cluster.nodes] for node in nodes_to_add: if not node.online: raise web.badrequest( "Can not add offline node to cluster") map(cluster.nodes.remove, nodes_to_remove) map(cluster.nodes.append, nodes_to_add) for node in nodes_to_remove: network_manager.clear_assigned_networks(node.id) network_manager.clear_all_allowed_networks(node.id) for node in nodes_to_add: network_manager.allow_network_assignment_to_all_interfaces( node.id) network_manager.assign_networks_to_main_interface(node.id) else: setattr(cluster, key, value) db().commit() return self.render(cluster)
def PUT(self, node_id): """:returns: JSONized Node object. :http: * 200 (OK) * 400 (invalid node data specified) * 404 (node not found in db) """ node = self.get_object_or_404(Node, node_id) if not node.attributes: node.attributes = NodeAttributes(node_id=node.id) data = self.checked_data(self.validator.validate_update) network_manager = NetworkManager() old_cluster_id = node.cluster_id if data.get("pending_roles") == [] and node.cluster: node.cluster.clear_pending_changes(node_id=node.id) if "cluster_id" in data: if data["cluster_id"] is None and node.cluster: node.cluster.clear_pending_changes(node_id=node.id) node.roles = node.pending_roles = [] node.cluster_id = data["cluster_id"] if node.cluster_id != old_cluster_id: if old_cluster_id: network_manager.clear_assigned_networks(node) network_manager.clear_all_allowed_networks(node.id) if node.cluster_id: network_manager.assign_networks_by_default(node) network_manager.allow_network_assignment_to_all_interfaces( node) regenerate_volumes = any( ('roles' in data and set(data['roles']) != set(node.roles), 'pending_roles' in data and set(data['pending_roles']) != set(node.pending_roles), node.cluster_id != old_cluster_id)) for key, value in data.iteritems(): # we don't allow to update id explicitly # and updated cluster_id before all other fields if key in ("id", "cluster_id"): continue setattr(node, key, value) if not node.status in ('provisioning', 'deploying') and regenerate_volumes: try: node.attributes.volumes = \ node.volume_manager.gen_volumes_info() except Exception as exc: msg = (u"Failed to generate volumes " "info for node '{0}': '{1}'").format( node.name or data.get("mac") or data.get("id"), str(exc) or "see logs for details") logger.warning(traceback.format_exc()) notifier.notify("error", msg, node_id=node.id) db().commit() return self.render(node)
def get_default(self, node): if node.cluster and node.cluster.net_provider == 'neutron': network_manager = NeutronManager() else: network_manager = NetworkManager() return network_manager.get_default_networks_assignment(node)
def render(cls, instance, fields=None): json_data = JSONHandler.render(instance, fields=cls.fields) network_manager = NetworkManager() json_data['network_data'] = network_manager.get_node_networks( instance.id ) return json_data
def update(cls, cluster, network_configuration): from nailgun.network.manager import NetworkManager network_manager = NetworkManager() if 'net_manager' in network_configuration: setattr(cluster, 'net_manager', network_configuration['net_manager']) if 'networks' in network_configuration: for ng in network_configuration['networks']: ng_db = orm().query(NetworkGroup).get(ng['id']) for key, value in ng.iteritems(): if key == "ip_ranges": # deleting old ip ranges map( orm().delete, orm().query(IPAddrRange).filter_by( network_group_id=ng['id'])) for r in value: new_ip_range = IPAddrRange( first=r[0], last=r[1], network_group_id=ng['id']) orm().add(new_ip_range) orm().commit() else: setattr(ng_db, key, value) network_manager.create_networks(ng_db) ng_db.cluster.add_pending_changes('networks')
def get_common_attrs(cls, cluster): """Common attributes for all facts """ common_attrs = super(OrchestratorHASerializer, cls).get_common_attrs( cluster) netmanager = NetworkManager() common_attrs['management_vip'] = netmanager.assign_vip( cluster.id, 'management') common_attrs['public_vip'] = netmanager.assign_vip( cluster.id, 'public') sorted_nodes = sorted( common_attrs['nodes'], key=lambda node: node['uid']) controller_nodes = cls.filter_by_roles( sorted_nodes, ['controller', 'primary-controller']) common_attrs['last_controller'] = controller_nodes[-1]['name'] # Assign primary controller in nodes list cls.set_primary_controller(common_attrs['nodes']) common_attrs['mp'] = [ {'point': '1', 'weight': '1'}, {'point': '2', 'weight': '2'}] return common_attrs
def __format_node_for_naily(cls, n): netmanager = NetworkManager() return { 'id': n.id, 'status': n.status, 'error_type': n.error_type, 'uid': n.id, 'ip': n.ip, 'mac': n.mac, 'role': n.role, 'fqdn': n.fqdn, 'progress': n.progress, 'meta': n.meta, 'network_data': netmanager.get_node_networks(n.id), 'online': n.online }
def get_admin_ips(cls, node): netmanager = NetworkManager() admin_net_id = netmanager.get_admin_network_id() admin_ips = set([ i.ip_addr for i in db().query(IPAddr). filter_by(node=node.id). filter_by(network=admin_net_id)]) return admin_ips
def prepare_for_provisioning(self): from nailgun.network.manager import NetworkManager from nailgun.task.helpers import TaskHelper netmanager = NetworkManager() nodes = TaskHelper.nodes_to_provision(self) TaskHelper.update_slave_nodes_fqdn(nodes) for node in nodes: netmanager.assign_admin_ips(node.id, len(node.meta.get('interfaces', [])))
def render(cls, instance, fields=None): json_data = None try: json_data = JSONHandler.render(instance, fields=cls.fields) network_manager = NetworkManager() json_data['network_data'] = network_manager.get_node_networks( instance.id) except Exception: logger.error(traceback.format_exc()) return json_data
def __check_network(cls, task): netmanager = NetworkManager() nodes_count = len(task.cluster.nodes) public_network = filter(lambda ng: ng.name == 'public', task.cluster.network_groups)[0] public_network_size = cls.__network_size(public_network) if public_network_size < nodes_count: error_message = cls.__format_network_error(nodes_count) raise errors.NetworkCheckError(error_message)
def PUT(self): data = self.validator.validate_collection_structure(web.data()) network_manager = NetworkManager() updated_nodes_ids = [] for node_data in data: self.validator.verify_data_correctness(node_data) node_id = network_manager._update_attrs(node_data) updated_nodes_ids.append(node_id) updated_nodes = db().query(Node).filter( Node.id.in_(updated_nodes_ids)).all() return map(self.render, updated_nodes)
def verify_data_correctness(cls, node): db_node = db().query(Node).filter_by(id=node['id']).first() if not db_node: raise errors.InvalidData("There is no node with ID '%d' in DB" % node['id'], log_message=True) interfaces = node['interfaces'] db_interfaces = db_node.interfaces if len(interfaces) != len(db_interfaces): raise errors.InvalidData( "Node '%d' has different amount of interfaces" % node['id'], log_message=True) # FIXIT: we should use not all networks but appropriate for this # node only. db_network_groups = db().query(NetworkGroup).filter_by( cluster_id=db_node.cluster_id).all() if not db_network_groups: raise errors.InvalidData("There are no networks related to" " node '%d' in DB" % node['id'], log_message=True) network_group_ids = set([ng.id for ng in db_network_groups]) if db_node.cluster and db_node.cluster.net_provider == 'neutron': net_manager = NeutronManager() else: net_manager = NetworkManager() admin_ng_id = net_manager.get_admin_network_group_id() for iface in interfaces: db_iface = filter(lambda i: i.id == iface['id'], db_interfaces) if not db_iface: raise errors.InvalidData("There is no interface with ID '%d'" " for node '%d' in DB" % (iface['id'], node['id']), log_message=True) db_iface = db_iface[0] for net in iface['assigned_networks']: if net['id'] not in network_group_ids and not \ net['id'] == admin_ng_id: raise errors.InvalidData( "Node '%d' shouldn't be connected to" " network with ID '%d'" % (node['id'], net['id']), log_message=True) elif net['id'] != admin_ng_id: network_group_ids.remove(net['id']) # Check if there are unassigned networks for this node. if network_group_ids: raise errors.InvalidData( "Too few networks to assign to node '%d'" % node['id'], log_message=True)
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map(cls.serialize_network_group, cluster.network_groups) if cluster.mode == 'ha': net_manager = NetworkManager() result['management_vip'] = net_manager.assign_vip( cluster.id, 'management') result['public_vip'] = net_manager.assign_vip(cluster.id, 'public') return result
def get_admin_ip(cls, node): """Getting admin ip and assign prefix from admin network.""" network_manager = NetworkManager() admin_ip = network_manager.get_admin_ips_for_interfaces(node)[ node.admin_interface.name] admin_ip = IPNetwork(admin_ip) # Assign prefix from admin network admin_net = IPNetwork(network_manager.get_admin_network().cidr) admin_ip.prefixlen = admin_net.prefixlen return str(admin_ip)
def add_vlan_interfaces(cls, node): """Assign fixed_interfaces and vlan_interface. They should be equal. """ fixed_interface = NetworkManager()._get_interface_by_network_name( node.id, 'fixed') attrs = { 'fixed_interface': fixed_interface.name, 'vlan_interface': fixed_interface.name } return attrs
def add_vlan_interfaces(cls, nodes): """Assign fixed_interfaces and vlan_interface. They should be equal. """ netmanager = NetworkManager() for node in nodes: node_db = db().query(Node).get(node['uid']) fixed_interface = netmanager._get_interface_by_network_name( node_db.id, 'fixed') node['fixed_interface'] = fixed_interface.name node['vlan_interface'] = fixed_interface.name
def PUT(self, node_id): """:returns: Collection of JSONized Node objects. :http: * 200 (nodes are successfully updated) * 400 (invalid nodes data specified) """ interfaces_data = self.validator.validate_json(web.data()) node_data = {'id': node_id, 'interfaces': interfaces_data} self.validator.validate(node_data) network_manager = NetworkManager() network_manager._update_attrs(node_data) node = self.get_object_or_404(Node, node_id) return self.render(node)['interfaces']
def __init__(self, app): self.db = db() self.app = app self.tester = TestCase self.tester.runTest = lambda a: None self.tester = self.tester() self.here = os.path.abspath(os.path.dirname(__file__)) self.fixture_dir = os.path.join(self.here, "..", "fixtures") self.default_headers = {"Content-Type": "application/json"} self.releases = [] self.clusters = [] self.nodes = [] self.network_manager = NetworkManager()
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map(cls.serialize_network_group, cluster.network_groups) if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata net_manager = NetworkManager() for network in nw_metadata: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"])] = net_manager.assign_vip( cluster.id, network["name"]) return result
def test_ip_range_intersection(self): nm = NetworkManager() self.assertEqual( nm.is_range_in_cidr(IPRange('192.168.0.0', '192.168.255.255'), IPNetwork('192.168.1.0/24')), True) self.assertEqual( nm.is_range_in_cidr(IPRange('164.174.47.1', '191.0.0.0'), IPNetwork('192.168.1.0/24')), False) self.assertEqual( nm.is_range_in_cidr(IPRange('192.168.0.0', '192.168.255.255'), IPRange('164.174.47.1', '191.0.0.0')), False) self.assertEqual( nm.is_range_in_cidr(IPNetwork('192.168.1.0/8'), IPNetwork('192.168.1.0/24')), True)
def __add_vlan_interfaces(cls, nodes): """ We shouldn't pass to orchetrator fixed network when network manager is VlanManager, but we should specify fixed_interface (private_interface in terms of fuel) as result we just pass vlan_interface as node attribute. """ netmanager = NetworkManager() for node in nodes: node_db = orm().query(Node).get(node['id']) fixed_interface = netmanager._get_interface_by_network_name( node_db, 'fixed') node['vlan_interface'] = fixed_interface.name
def prepare_for_deployment(self): from nailgun.network.manager import NetworkManager from nailgun.task.helpers import TaskHelper nodes = set(TaskHelper.nodes_to_deploy(self) + TaskHelper.nodes_in_provisioning(self)) TaskHelper.update_slave_nodes_fqdn(nodes) nodes_ids = sorted([n.id for n in nodes]) netmanager = NetworkManager() if nodes_ids: netmanager.assign_ips(nodes_ids, 'management') netmanager.assign_ips(nodes_ids, 'public') netmanager.assign_ips(nodes_ids, 'storage')
def PUT(self): """:returns: Collection of JSONized Node objects. :http: * 200 (nodes are successfully updated) * 400 (invalid nodes data specified) """ data = self.validator.validate_collection_structure(web.data()) network_manager = NetworkManager() updated_nodes_ids = [] for node_data in data: self.validator.verify_data_correctness(node_data) node_id = network_manager._update_attrs(node_data) updated_nodes_ids.append(node_id) updated_nodes = db().query(Node).filter( Node.id.in_(updated_nodes_ids)).all() return map(self.render, updated_nodes)
def render(cls, nodes, fields=None): json_list = [] network_manager = NetworkManager() ips_mapped = network_manager.get_grouped_ips_by_node() networks_grouped = network_manager.get_networks_grouped_by_cluster() for node in nodes: try: json_data = JSONHandler.render(node, fields=cls.fields) json_data['network_data'] = network_manager.\ get_node_networks_optimized( node, ips_mapped.get(node.id, []), networks_grouped.get(node.cluster_id, [])) json_list.append(json_data) except Exception: logger.error(traceback.format_exc()) return json_list
def admin_interface(self): """Iterate over interfaces, if admin subnet include ip address of current interface then return this interface. :raises: errors.CanNotFindInterface """ from nailgun.network.manager import NetworkManager network_manager = NetworkManager() for interface in self.interfaces: ip_addr = interface.ip_addr if network_manager.is_ip_belongs_to_admin_subnet(ip_addr): return interface logger.warning(u'Cannot find admin interface for node ' 'return first interface: "%s"' % self.full_name) return self.interfaces[0]
def admin_interface(self): """Iterate over interfaces, if admin subnet include ip address of current interface then return this interface. :raises: errors.CanNotFindInterface """ from nailgun.network.manager import NetworkManager admin_cidr = NetworkManager().get_admin_network().cidr for interface in self.interfaces: ip_addr = interface.ip_addr if ip_addr and IPAddress(ip_addr) in IPNetwork(admin_cidr): return interface logger.warning(u'Cannot find admin interface for node ' 'return first interface: "%s"' % self.full_name) return self.interfaces[0]