def get_common_attrs(cls, cluster): """Common attributes for all facts """ common_attrs = super(OrchestratorHASerializer, cls).get_common_attrs( cluster) netmanager = NetworkManager() common_attrs['management_vip'] = netmanager.assign_vip( cluster.id, 'management') common_attrs['public_vip'] = netmanager.assign_vip( cluster.id, 'public') sorted_nodes = sorted( common_attrs['nodes'], key=lambda node: node['uid']) controller_nodes = cls.filter_by_roles( sorted_nodes, ['controller', 'primary-controller']) common_attrs['last_controller'] = controller_nodes[-1]['name'] # Assign primary controller in nodes list cls.set_primary_controller(common_attrs['nodes']) common_attrs['mp'] = [ {'point': '1', 'weight': '1'}, {'point': '2', 'weight': '2'}] return common_attrs
def get_common_attrs(cls, cluster): """Common attributes for all facts """ common_attrs = super( NovaOrchestratorHASerializer, cls ).get_common_attrs(cluster) netmanager = NetworkManager() common_attrs['management_vip'] = netmanager.assign_vip( cluster.id, 'management') common_attrs['public_vip'] = netmanager.assign_vip( cluster.id, 'public') sorted_nodes = sorted( common_attrs['nodes'], key=lambda node: int(node['uid'])) controller_nodes = cls.filter_by_roles( sorted_nodes, ['controller', 'primary-controller']) common_attrs['last_controller'] = controller_nodes[-1]['name'] # Assign primary controller in nodes list cls.set_primary_controller(common_attrs['nodes']) common_attrs['mp'] = [ {'point': '1', 'weight': '1'}, {'point': '2', 'weight': '2'}] return common_attrs
def get_common_attrs(cls, cluster): """Common attributes for all facts """ common_attrs = super(OrchestratorHASerializer, cls).get_common_attrs( cluster) netmanager = NetworkManager() common_attrs['management_vip'] = netmanager.assign_vip( cluster.id, 'management') common_attrs['public_vip'] = netmanager.assign_vip( cluster.id, 'public') common_attrs['last_controller'] = sorted( common_attrs['controller_nodes'], key=lambda node: node['uid'])[-1]['name'] first_controller = filter( lambda node: 'controller' in node['role'], common_attrs['nodes'])[0] first_controller['role'] = 'primary-controller' common_attrs['mp'] = [ {'point': '1', 'weight': '1'}, {'point': '2', 'weight': '2'}] common_attrs['mountpoints'] = '1 1\\n2 2\\n' return common_attrs
def serialize_for_cluster(cls, cluster): result = {} result["net_manager"] = cluster.net_manager result["networks"] = map(cls.serialize_network_group, cluster.network_groups) if cluster.is_ha_mode: net_manager = NetworkManager() result["management_vip"] = net_manager.assign_vip(cluster.id, "management") result["public_vip"] = net_manager.assign_vip(cluster.id, "public") return result
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map(cls.serialize_network_group, cluster.network_groups) if cluster.mode == 'ha': net_manager = NetworkManager() result['management_vip'] = net_manager.assign_vip( cluster.id, 'management') result['public_vip'] = net_manager.assign_vip(cluster.id, 'public') return result
class TestNetworkConfigurationHandlerHAMode(BaseIntegrationTest): def setUp(self): super(TestNetworkConfigurationHandlerHAMode, self).setUp() cluster = self.env.create_cluster(api=True, mode="ha_compact") self.cluster = self.db.query(Cluster).get(cluster["id"]) self.net_manager = NetworkManager() def test_returns_management_vip_and_public_vip(self): url = reverse("NovaNetworkConfigurationHandler", kwargs={"cluster_id": self.cluster.id}) resp = json.loads(self.app.get(url, headers=self.default_headers).body) self.assertEquals(resp["management_vip"], self.net_manager.assign_vip(self.cluster.id, "management")) self.assertEquals(resp["public_vip"], self.net_manager.assign_vip(self.cluster.id, "public"))
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map( cls.serialize_network_group, cluster.network_groups ) if cluster.mode == 'ha': net_manager = NetworkManager() result['management_vip'] = net_manager.assign_vip( cluster.id, 'management') result['public_vip'] = net_manager.assign_vip( cluster.id, 'public') return result
def get_common_attrs(cls, cluster): """Common attributes for all facts """ common_attrs = super(DeploymentHASerializer, cls).get_common_attrs(cluster) for ng in cluster.network_groups: if ng.meta.get("assign_vip"): common_attrs[ng.name + '_vip'] = NetworkManager.assign_vip( cluster.id, ng.name) common_attrs['mp'] = [{ 'point': '1', 'weight': '1' }, { 'point': '2', 'weight': '2' }] sorted_nodes = sorted(common_attrs['nodes'], key=lambda node: int(node['uid'])) controller_nodes = cls.filter_by_roles( sorted_nodes, ['controller', 'primary-controller']) common_attrs['last_controller'] = controller_nodes[-1]['name'] # Assign primary controller in nodes list cls.set_primary_controller(common_attrs['nodes']) return common_attrs
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map( cls.serialize_network_group, cluster.network_groups ) net_manager = NetworkManager() result['networks'].append( cls.serialize_network_group( net_manager.get_admin_network_group() ) ) if cluster.dns_nameservers: result['dns_nameservers'] = { "nameservers": cluster.dns_nameservers } if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata["nova_network"] for network in nw_metadata["networks"]: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"] )] = net_manager.assign_vip( cluster.id, network["name"] ) return result
def get_common_attrs(cls, cluster): """Common attributes for all facts """ common_attrs = super( DeploymentHASerializer, cls ).get_common_attrs(cluster) for ng in cluster.network_groups: if ng.meta.get("assign_vip"): common_attrs[ng.name + '_vip'] = NetworkManager.assign_vip( cluster.id, ng.name) common_attrs['mp'] = [ {'point': '1', 'weight': '1'}, {'point': '2', 'weight': '2'}] sorted_nodes = sorted( common_attrs['nodes'], key=lambda node: int(node['uid'])) controller_nodes = cls.filter_by_roles( sorted_nodes, ['controller', 'primary-controller']) common_attrs['last_controller'] = controller_nodes[-1]['name'] # Assign primary controller in nodes list cls.set_primary_controller(common_attrs['nodes']) return common_attrs
class TestNovaNetworkConfigurationHandlerHA(BaseIntegrationTest): def setUp(self): super(TestNovaNetworkConfigurationHandlerHA, self).setUp() cluster = self.env.create_cluster(api=True, mode='ha_compact') self.cluster = self.db.query(Cluster).get(cluster['id']) self.net_manager = NetworkManager() def test_returns_management_vip_and_public_vip(self): resp = json.loads(self.env.nova_networks_get(self.cluster.id).body) self.assertEquals( resp['management_vip'], self.net_manager.assign_vip(self.cluster.id, 'management')) self.assertEquals( resp['public_vip'], self.net_manager.assign_vip(self.cluster.id, 'public'))
class TestNetworkConfigurationHandlerHAMode(BaseIntegrationTest): def setUp(self): super(TestNetworkConfigurationHandlerHAMode, self).setUp() cluster = self.env.create_cluster(api=True, mode='ha_compact') self.cluster = self.db.query(Cluster).get(cluster['id']) self.net_manager = NetworkManager() def test_returns_management_vip_and_public_vip(self): url = reverse('NetworkConfigurationHandler', kwargs={'cluster_id': self.cluster.id}) resp = json.loads(self.app.get(url, headers=self.default_headers).body) self.assertEquals( resp['management_vip'], self.net_manager.assign_vip(self.cluster.id, 'management')) self.assertEquals( resp['public_vip'], self.net_manager.assign_vip(self.cluster.id, 'public'))
class TestNetworkConfigurationHandlerHAMode(BaseIntegrationTest): def setUp(self): super(TestNetworkConfigurationHandlerHAMode, self).setUp() cluster = self.env.create_cluster(api=True, mode='ha_compact') self.cluster = self.db.query(Cluster).get(cluster['id']) self.net_manager = NetworkManager() def test_returns_management_vip_and_public_vip(self): url = reverse('NovaNetworkConfigurationHandler', kwargs={'cluster_id': self.cluster.id}) resp = json.loads(self.app.get(url, headers=self.default_headers).body) self.assertEquals( resp['management_vip'], self.net_manager.assign_vip(self.cluster.id, 'management')) self.assertEquals( resp['public_vip'], self.net_manager.assign_vip(self.cluster.id, 'public'))
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map(cls.serialize_network_group, cluster.network_groups) if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata net_manager = NetworkManager() for network in nw_metadata: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"])] = net_manager.assign_vip( cluster.id, network["name"]) return result
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map( cls.serialize_network_group, cluster.network_groups ) if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata net_manager = NetworkManager() for network in nw_metadata: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"] )] = net_manager.assign_vip( cluster.id, network["name"] ) return result
def serialize_for_cluster(cls, cluster): result = {} result['net_manager'] = cluster.net_manager result['networks'] = map(cls.serialize_network_group, cluster.network_groups) net_manager = NetworkManager() result['networks'].append( cls.serialize_network_group(net_manager.get_admin_network_group())) if cluster.dns_nameservers: result['dns_nameservers'] = { "nameservers": cluster.dns_nameservers } if cluster.is_ha_mode: nw_metadata = cluster.release.networks_metadata["nova_network"] for network in nw_metadata["networks"]: if network.get("assign_vip") is not False: result['{0}_vip'.format( network["name"])] = net_manager.assign_vip( cluster.id, network["name"]) return result
def message(cls, task): logger.debug("DeploymentTask.message(task=%s)" % task.uuid) task_uuid = task.uuid cluster_id = task.cluster.id netmanager = NetworkManager() nodes = TaskHelper.nodes_to_deploy(task.cluster) logger.info("Associated FQDNs to nodes: %s" % ', '.join([n.fqdn for n in nodes])) nodes_ids = [n.id for n in nodes] if nodes_ids: logger.info("Assigning IP addresses to nodes..") netmanager.assign_ips(nodes_ids, "management") netmanager.assign_ips(nodes_ids, "public") netmanager.assign_ips(nodes_ids, "storage") nodes_with_attrs = [] for n in nodes: n.pending_addition = False if n.status in ('ready', 'deploying'): n.status = 'provisioned' n.progress = 0 orm().add(n) orm().commit() nodes_with_attrs.append(cls.__format_node_for_naily(n)) cluster_attrs = task.cluster.attributes.merged_attrs_values() cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id) nets_db = orm().query(Network).join(NetworkGroup).\ filter(NetworkGroup.cluster_id == cluster_id).all() ng_db = orm().query(NetworkGroup).filter_by( cluster_id=cluster_id).all() for net in ng_db: cluster_attrs[net.name + '_network_range'] = net.cidr cluster_attrs['network_manager'] = task.cluster.net_manager if cluster_attrs['network_manager'] == 'VlanManager': fixed_net = orm().query(NetworkGroup).filter_by( cluster_id=cluster_id).filter_by(name='fixed').first() cluster_attrs['network_size'] = fixed_net.network_size cluster_attrs['num_networks'] = fixed_net.amount cluster_attrs['vlan_start'] = fixed_net.vlan_start cls.__add_vlan_interfaces(nodes_with_attrs) if task.cluster.mode == 'ha': logger.info("HA mode chosen, creating VIP addresses for it..") cluster_attrs['management_vip'] = netmanager.assign_vip( cluster_id, "management") cluster_attrs['public_vip'] = netmanager.assign_vip( cluster_id, "public") cluster_attrs['deployment_mode'] = task.cluster.mode cluster_attrs['deployment_id'] = cluster_id message = { 'method': 'deploy', 'respond_to': 'deploy_resp', 'args': { 'task_uuid': task.uuid, 'nodes': nodes_with_attrs, 'attributes': cluster_attrs } } return message
def message(cls, task): logger.debug("DeploymentTask.message(task=%s)" % task.uuid) task_uuid = task.uuid cluster_id = task.cluster.id netmanager = NetworkManager() nodes = TaskHelper.nodes_to_deploy(task.cluster) logger.info("Associated FQDNs to nodes: %s" % ', '.join([n.fqdn for n in nodes])) nodes_ids = [n.id for n in nodes] if nodes_ids: logger.info("Assigning IP addresses to nodes..") netmanager.assign_ips(nodes_ids, "management") netmanager.assign_ips(nodes_ids, "public") netmanager.assign_ips(nodes_ids, "storage") nodes_with_attrs = [] for n in nodes: n.pending_addition = False if n.status in ('ready', 'deploying'): n.status = 'provisioned' n.progress = 0 orm().add(n) orm().commit() nodes_with_attrs.append(cls.__format_node_for_naily(n)) cluster_attrs = task.cluster.attributes.merged_attrs_values() cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id) nets_db = orm().query(Network).join(NetworkGroup).\ filter(NetworkGroup.cluster_id == cluster_id).all() ng_db = orm().query(NetworkGroup).filter_by( cluster_id=cluster_id).all() for net in ng_db: net_name = net.name + '_network_range' if net.name == 'floating': cluster_attrs[net_name] = \ cls.__get_ip_addresses_in_ranges(net) elif net.name == 'public': # We shouldn't pass public_network_range attribute continue else: cluster_attrs[net_name] = net.cidr cluster_attrs['network_manager'] = task.cluster.net_manager fixed_net = orm().query(NetworkGroup).filter_by( cluster_id=cluster_id).filter_by(name='fixed').first() # network_size is required for all managers, otherwise # puppet will use default (255) cluster_attrs['network_size'] = fixed_net.network_size if cluster_attrs['network_manager'] == 'VlanManager': cluster_attrs['num_networks'] = fixed_net.amount cluster_attrs['vlan_start'] = fixed_net.vlan_start cls.__add_vlan_interfaces(nodes_with_attrs) if task.cluster.mode == 'ha': logger.info("HA mode chosen, creating VIP addresses for it..") cluster_attrs['management_vip'] = netmanager.assign_vip( cluster_id, "management") cluster_attrs['public_vip'] = netmanager.assign_vip( cluster_id, "public") cluster_attrs['deployment_mode'] = task.cluster.mode cluster_attrs['deployment_id'] = cluster_id message = { 'method': 'deploy', 'respond_to': 'deploy_resp', 'args': { 'task_uuid': task.uuid, 'nodes': nodes_with_attrs, 'attributes': cluster_attrs } } return message
def message(cls, task): logger.debug("DeploymentTask.message(task=%s)" % task.uuid) task_uuid = task.uuid cluster_id = task.cluster.id netmanager = NetworkManager() nodes = TaskHelper.nodes_to_deploy(task.cluster) logger.info("Associated FQDNs to nodes: %s" % ', '.join([n.fqdn for n in nodes])) nodes_ids = [n.id for n in nodes] if nodes_ids: logger.info("Assigning IP addresses to nodes..") netmanager.assign_ips(nodes_ids, "management") netmanager.assign_ips(nodes_ids, "public") netmanager.assign_ips(nodes_ids, "storage") nodes_with_attrs = [] # FIXME(mihgen): We need to pass all other nodes, so astute # can know about all the env, not only about added nodes. for n in db().query(Node).filter_by(cluster=task.cluster).order_by( Node.id): # However, we must not pass nodes which are set to be deleted. if n.pending_deletion: continue if n.id in nodes_ids: # It's node which we need to redeploy n.pending_addition = False if n.status in ('deploying'): n.status = 'provisioned' n.progress = 0 db().add(n) db().commit() nodes_with_attrs.append(cls.__format_node_for_naily(n)) cluster_attrs = task.cluster.attributes.merged_attrs_values() cluster_attrs['master_ip'] = settings.MASTER_IP cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id) nets_db = db().query(Network).join(NetworkGroup).\ filter(NetworkGroup.cluster_id == cluster_id).all() ng_db = db().query(NetworkGroup).filter_by(cluster_id=cluster_id).all() for net in ng_db: net_name = net.name + '_network_range' if net.name == 'floating': cluster_attrs[net_name] = \ cls.__get_ip_ranges_first_last(net) elif net.name == 'public': # We shouldn't pass public_network_range attribute continue else: cluster_attrs[net_name] = net.cidr net_params = {} net_params['network_manager'] = task.cluster.net_manager fixed_net = db().query(NetworkGroup).filter_by( cluster_id=cluster_id).filter_by(name='fixed').first() # network_size is required for all managers, otherwise # puppet will use default (255) net_params['network_size'] = fixed_net.network_size if net_params['network_manager'] == 'VlanManager': net_params['num_networks'] = fixed_net.amount net_params['vlan_start'] = fixed_net.vlan_start cls.__add_vlan_interfaces(nodes_with_attrs) cluster_attrs['novanetwork_parameters'] = net_params if task.cluster.mode == 'ha': logger.info("HA mode chosen, creating VIP addresses for it..") cluster_attrs['management_vip'] = netmanager.assign_vip( cluster_id, "management") cluster_attrs['public_vip'] = netmanager.assign_vip( cluster_id, "public") cluster_attrs['deployment_mode'] = task.cluster.mode cluster_attrs['deployment_id'] = cluster_id message = { 'method': 'deploy', 'respond_to': 'deploy_resp', 'args': { 'task_uuid': task.uuid, 'nodes': nodes_with_attrs, 'attributes': cluster_attrs } } return message
def message(cls, task): logger.debug("DeploymentTask.message(task=%s)" % task.uuid) task_uuid = task.uuid cluster_id = task.cluster.id netmanager = NetworkManager() nodes = TaskHelper.nodes_to_deploy(task.cluster) logger.info("Associated FQDNs to nodes: %s" % ', '.join([n.fqdn for n in nodes])) nodes_ids = [n.id for n in nodes] if nodes_ids: logger.info("Assigning IP addresses to nodes..") netmanager.assign_ips(nodes_ids, "management") netmanager.assign_ips(nodes_ids, "public") netmanager.assign_ips(nodes_ids, "storage") nodes_with_attrs = [] # FIXME(mihgen): We need to pass all other nodes, so astute # can know about all the env, not only about added nodes. for n in db().query(Node).filter_by( cluster=task.cluster ).order_by(Node.id): # However, we must not pass nodes which are set to be deleted. if n.pending_deletion: continue if n.id in nodes_ids: # It's node which we need to redeploy n.pending_addition = False if n.status in ('deploying'): n.status = 'provisioned' n.progress = 0 db().add(n) db().commit() nodes_with_attrs.append(cls.__format_node_for_naily(n)) cluster_attrs = task.cluster.attributes.merged_attrs_values() cluster_attrs['master_ip'] = settings.MASTER_IP cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id) nets_db = db().query(Network).join(NetworkGroup).\ filter(NetworkGroup.cluster_id == cluster_id).all() ng_db = db().query(NetworkGroup).filter_by( cluster_id=cluster_id).all() for net in ng_db: net_name = net.name + '_network_range' if net.name == 'floating': cluster_attrs[net_name] = \ cls.__get_ip_ranges_first_last(net) elif net.name == 'public': # We shouldn't pass public_network_range attribute continue else: cluster_attrs[net_name] = net.cidr net_params = {} net_params['network_manager'] = task.cluster.net_manager fixed_net = db().query(NetworkGroup).filter_by( cluster_id=cluster_id).filter_by(name='fixed').first() # network_size is required for all managers, otherwise # puppet will use default (255) net_params['network_size'] = fixed_net.network_size if net_params['network_manager'] == 'VlanManager': net_params['num_networks'] = fixed_net.amount net_params['vlan_start'] = fixed_net.vlan_start cls.__add_vlan_interfaces(nodes_with_attrs) cluster_attrs['novanetwork_parameters'] = net_params if task.cluster.mode == 'ha': logger.info("HA mode chosen, creating VIP addresses for it..") cluster_attrs['management_vip'] = netmanager.assign_vip( cluster_id, "management") cluster_attrs['public_vip'] = netmanager.assign_vip( cluster_id, "public") cluster_attrs['deployment_mode'] = task.cluster.mode cluster_attrs['deployment_id'] = cluster_id message = { 'method': 'deploy', 'respond_to': 'deploy_resp', 'args': { 'task_uuid': task.uuid, 'nodes': nodes_with_attrs, 'attributes': cluster_attrs } } return message