def cluster_config_set_update(self, req, config_set_meta): if 'cluster' in config_set_meta: orig_cluster = config_set_meta['cluster'] self._raise_404_if_cluster_deleted(req, orig_cluster) backend = manager.configBackend('clushshell', req) try: if config_set_meta.get('role', None): role_id_list = self._raise_404_if_role_exist( req, config_set_meta) if len(role_id_list) == len(config_set_meta['role']): backend.push_config_by_roles(role_id_list) else: msg = "the role is not exist" LOG.error(msg) raise HTTPNotFound(msg) elif config_set_meta.get('host_id'): hosts = config_set_meta['host_id'] backend.push_config_by_hosts(hosts) else: roles = registry.get_roles_detail(req.context) role_id_list = [] for role in roles: if role['cluster_id'] == config_set_meta['cluster']: role_id_list.append(role['id']) backend.push_config_by_roles(role_id_list) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) config_status = {"status": "config successful"} return {'config_set': config_status} else: msg = "the cluster is not exist" LOG.error(msg) raise HTTPNotFound(msg)
def detail(self, req): """ Returns detailed information for all available roles :param req: The WSGI/Webob Request object :retval The response body is a mapping of the following form:: {'roles': [ {'id': <ID>, 'name': <NAME>, 'description': <DESCRIPTION>, 'created_at': <TIMESTAMP>, 'updated_at': <TIMESTAMP>, 'deleted_at': <TIMESTAMP>|<NONE>,}, ... ]} """ self._enforce(req, 'get_roles') params = self._get_query_params(req) filters = params.get('filters', None) if 'cluster_id' in filters: cluster_id = filters['cluster_id'] self._raise_404_if_cluster_deleted(req, cluster_id) try: roles = registry.get_roles_detail(req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return dict(roles=roles)
def get_cluster_roles_detail(req, cluster_id): try: params = {'cluster_id': cluster_id} roles = registry.get_roles_detail(req.context, **params) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return roles
def cluster_config_set_update(self, req, config_set_meta): if config_set_meta.has_key('cluster'): orig_cluster = str(config_set_meta['cluster']) self._raise_404_if_cluster_deleted(req, orig_cluster) try: if config_set_meta.get('role',None): role_id_list=self._raise_404_if_role_exist(req,config_set_meta) if len(role_id_list) == len(eval(config_set_meta['role'])): for role_id in role_id_list: backend=manager.configBackend('clushshell', req, role_id) backend.push_config() else: msg = "the role is not exist" LOG.error(msg) raise HTTPNotFound(msg) else: roles = registry.get_roles_detail(req.context) for role in roles: if role['cluster_id'] == config_set_meta['cluster']: backend=manager.configBackend('clushshell', req, role['id']) backend.push_config() except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) config_status={"status":"config successful"} return {'config_set':config_status} else: msg = "the cluster is not exist" LOG.error(msg) raise HTTPNotFound(msg)
def enable_neutron_backend(req, cluster_id, kolla_config): params = {'cluster_id': cluster_id} roles = registry.get_roles_detail(req.context, **params) all_neutron_backends = registry.list_neutron_backend_metadata( req.context, **params) for role in roles: for neutron_backend in all_neutron_backends: if role['name'] == 'CONTROLLER_LB' \ and neutron_backend[ 'neutron_backends_type'] == 'opendaylight' \ and neutron_backend['role_id'] == role['id']: opendaylight_config = { 'enable_opendaylight': "yes", 'neutron_plugin_agent': "opendaylight", 'opendaylight_mechanism_driver': "opendaylight_v2", 'opendaylight_l3_service_plugin': "odl-router_v2", 'enable_opendaylight_l3': "yes", 'enable_opendaylight_qos': "no", 'enable_opendaylight_legacy_netvirt_conntrack': "no", 'opendaylight_features': "odl-dlux-core,odl-dluxapps-applications," "odl-mdsal-apidocs,odl-netvirt-openstack", 'opendaylight_restconf_port': "8088", 'opendaylight_restconf_port_backup': "8182", 'opendaylight_haproxy_restconf_port': "8087", 'opendaylight_haproxy_restconf_port_backup': "8181"} if neutron_backend['enable_l2_or_l3'] == 'l2': opendaylight_config['enable_opendaylight_l3'] = 'no' update_kolla_globals_yml(opendaylight_config)
def check_template_role_name_repetition(self, req, role_name): all_roles = registry.get_roles_detail(req.context) template_roles = [role for role in all_roles if role['cluster_id'] == None] template_roles_name = [role['name'].lower() for role in template_roles] if role_name.lower() in template_roles_name: msg = _("The role %s has already been in the the template role." % role_name) LOG.debug(msg) raise HTTPForbidden(msg)
def check_cluster_role_name_repetition(self, req, role_name, cluster_id): all_roles = registry.get_roles_detail(req.context) cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] cluster_roles_name = [role['name'].lower() for role in cluster_roles] if role_name.lower() in cluster_roles_name: msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id)) LOG.debug(msg) raise HTTPForbidden(msg)
def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id): ''' return : host_disk_db_glance_nova_size['disk_size'] = 1024000 host_disk_db_glance_nova_size['db_lv_size'] = 1011 host_disk_db_glance_nova_size['glance_lv_size'] = 1011 host_disk_db_glance_nova_size['nova_lv_size'] = 1011 ''' #import pdb;pdb.set_trace() host_disk_db_glance_nova_size = dict() db_lv_size = list() glance_lv_size = list() nova_lv_size= list() disk_size = list() host_info = self.get_host_meta_or_404(req, host_id) if host_info: if host_info.has_key('deleted') and host_info['deleted']: msg = _("Node with identifier %s has been deleted.") % host_info['id'] LOG.debug(msg) raise HTTPNotFound(msg) #get host disk infomation host_disk = self._get_host_disk_except_os_disk_by_info(host_info) host_disk_db_glance_nova_size['disk_size'] = host_disk #get role_host db/galnce/nova infomation cluster_info = self.get_cluster_meta_or_404(req, cluster_id) if host_info.has_key('cluster'): #host with cluster if host_info['cluster'] != cluster_info['name']: #type(host_info['cluster']) = list, type(cluster_info['name']) = str msg = _("Role and hosts belong to different cluster.") LOG.debug(msg) raise HTTPNotFound(msg) else: all_roles = registry.get_roles_detail(req.context) cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] #roles infomation saved in cluster_roles if host_info.has_key('role') and host_info['role']: #host with role for role in cluster_roles: if role['name'] in host_info['role'] and cluster_roles: db_lv_size.append(role.get('db_lv_size', None)) glance_lv_size.append(role.get('glance_lv_size', None)) nova_lv_size.append(role.get('nova_lv_size', None)) if db_lv_size: host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size) else: #host without cluster host_disk_db_glance_nova_size['db_lv_size'] = 0 if glance_lv_size: host_disk_db_glance_nova_size['glance_lv_size'] = max(glance_lv_size) else: host_disk_db_glance_nova_size['glance_lv_size'] = 0 if nova_lv_size: host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size) else: host_disk_db_glance_nova_size['nova_lv_size'] = 0 LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s'% (host_id, host_disk_db_glance_nova_size)) return host_disk_db_glance_nova_size
def get_cluster_hosts_config(req, cluster_id): params = dict(limit=1000000) try: cluster_data = registry.get_cluster_metadata(req.context, cluster_id) networks = registry.get_networks_detail(req.context, cluster_id) all_roles = registry.get_roles_detail(req.context) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) roles = [role for role in all_roles if role['cluster_id'] == cluster_id] all_hosts_ids = cluster_data['nodes'] hosts_config = [] for host_id in all_hosts_ids: host_detail = daisy_cmn.get_host_detail(req, host_id) role_host_db_lv_size_lists = list() if host_detail.has_key('role') and host_detail['role']: host_roles = host_detail['role'] for role in roles: if role['name'] in host_detail['role'] and role['glance_lv_size']: host_detail['glance_lv_size'] = role['glance_lv_size'] if role.get('db_lv_size', None) and host_roles and role['name'] in host_roles: role_host_db_lv_size_lists.append(role['db_lv_size']) if role['name'] == 'COMPUTER' and role['name'] in host_detail['role'] and role['nova_lv_size']: host_detail['nova_lv_size'] = role['nova_lv_size'] service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role['id']}) for service_disk in service_disks: if service_disk['disk_location'] == 'local' and service_disk['service'] == 'mongodb': host_detail['mongodb_lv_size'] = service_disk['size'] break if role_host_db_lv_size_lists: host_detail['db_lv_size'] = max(role_host_db_lv_size_lists) else: host_detail['db_lv_size'] = 0 for interface in host_detail['interfaces']: if interface['type'] == 'bond'and interface['mode'] in LINUX_BOND_MODE.keys(): interface['mode'] = LINUX_BOND_MODE[interface['mode']] if (host_detail['os_status'] == host_os_status['INIT'] or host_detail['os_status'] == host_os_status['INSTALLING'] or host_detail['os_status'] == host_os_status['INSTALL_FAILED']): host_dhcp_interface = [hi for hi in host_detail['interfaces'] if hi['is_deployment']] if not host_dhcp_interface: msg = "cann't find dhcp interface on host %s" % host_detail['id'] raise exception.InvalidNetworkConfig(msg) if len(host_dhcp_interface) > 1: msg = "dhcp interface should only has one on host %s" % host_detail['id'] raise exception.InvalidNetworkConfig(msg) host_config_detail = copy.deepcopy(host_detail) host_config = _get_network_plat(req,host_config_detail, networks, host_dhcp_interface[0]['mac']) hosts_config.append(tecs_cmn.sort_interfaces_by_pci(host_config)) return hosts_config
def check_template_role_name_repetition(self, req, role_name): all_roles = registry.get_roles_detail(req.context) template_roles = [ role for role in all_roles if role['cluster_id'] is None ] template_roles_name = [role['name'].lower() for role in template_roles] if role_name.lower() in template_roles_name: msg = _("The role %s has already been in the the template role." % role_name) LOG.debug(msg) raise HTTPForbidden(msg)
def _raise_404_if_role_exist(self,req,config_meta): role_id="" try: roles = registry.get_roles_detail(req.context) for role in roles: if role['cluster_id'] == config_meta['cluster'] and role['name'] == config_meta['role']: role_id=role['id'] break except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return role_id
def check_cluster_role_name_repetition(self, req, role_name, cluster_id): all_roles = registry.get_roles_detail(req.context) cluster_roles = [ role for role in all_roles if role['cluster_id'] == cluster_id ] cluster_roles_name = [role['name'].lower() for role in cluster_roles] if role_name.lower() in cluster_roles_name: msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id)) LOG.debug(msg) raise HTTPForbidden(msg)
def _raise_404_if_role_exist(self, req, config_meta): role_id = "" try: roles = registry.get_roles_detail(req.context) for role in roles: if role['cluster_id'] == config_meta['cluster'] and role[ 'name'] == config_meta['role']: role_id = role['id'] break except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return role_id
def _raise_404_if_role_exist(self,req,config_set_meta): role_id_list=[] try: roles = registry.get_roles_detail(req.context) for role in roles: for role_name in eval(config_set_meta['role']): if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name: role_id_list.append(role['id']) break except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return role_id_list
def enable_cinder_backend(req, cluster_id, config_data): service_disks = [] params = {'filters': {'cluster_id': cluster_id}} roles = registry.get_roles_detail(req.context, **params) for role in roles: if role['name'] == 'CONTROLLER_LB': service_disk = _get_services_disk(req, role) service_disks += service_disk for disk in service_disks: if disk.get('service', None) == 'cinder' and\ disk.get('protocol_type', None) == 'LVM': config_lvm_for_cinder(config_data) elif disk.get('service', None) == 'ceph' and\ disk.get('protocol_type', None) == 'RAW' and\ disk.get('partition') != None and\ disk.get('partition') != '': config_ceph_for_cinder(config_data, disk)
def enable_neutron_backend(req, cluster_id, kolla_config): params = {'cluster_id': cluster_id} roles = registry.get_roles_detail(req.context, **params) all_neutron_backends = registry.list_neutron_backend_metadata( req.context, **params) for role in roles: for neutron_backend in all_neutron_backends: if role['name'] == 'CONTROLLER_LB' \ and neutron_backend[ 'neutron_backends_type'] == 'opendaylight' \ and neutron_backend['role_id'] == role['id']: opendaylight_config = { 'enable_opendaylight': "yes", 'neutron_plugin_agent': "opendaylight", 'opendaylight_mechanism_driver': "opendaylight_v2", 'opendaylight_l3_service_plugin': "odl-router_v2", 'enable_opendaylight_l3': "yes", 'enable_opendaylight_qos': "no", 'enable_opendaylight_legacy_netvirt_conntrack': "no", 'opendaylight_features': "odl-dlux-core,odl-dluxapps-applications," "odl-mdsal-apidocs,odl-netvirt-openstack", 'opendaylight_restconf_port': "8088", 'opendaylight_restconf_port_backup': "8182", 'opendaylight_haproxy_restconf_port': "8087", 'opendaylight_haproxy_restconf_port_backup': "8181" } if neutron_backend['enable_l2_or_l3'] == 'l2': opendaylight_config['enable_opendaylight_l3'] = 'no' update_kolla_globals_yml(opendaylight_config)
def cluster_config_set_progress(self, req, config_set_meta): role_list = [] if 'cluster' in config_set_meta: orig_cluster = config_set_meta['cluster'] self._raise_404_if_cluster_deleted(req, orig_cluster) try: if config_set_meta.get('role', None): role_id_list = self._raise_404_if_role_exist( req, config_set_meta) if len(role_id_list) == len(config_set_meta['role']): for role_id in role_id_list: role_info = {} role_meta = registry.get_role_metadata( req.context, role_id) role_info['role-name'] = role_meta['name'] role_info['config_set_update_progress'] = \ role_meta[ 'config_set_update_progress'] role_list.append(role_info) else: msg = "the role is not exist" LOG.error(msg) raise HTTPNotFound(msg) else: roles = registry.get_roles_detail(req.context) for role in roles: if role['cluster_id'] == config_set_meta['cluster']: role_info = {} role_info['role-name'] = role['name'] role_info['config_set_update_progress'] = role[ 'config_set_update_progress'] role_list.append(role_info) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return role_list else: msg = "the cluster is not exist" LOG.error(msg) raise HTTPNotFound(msg)
def template_to_host(self, req, host_template): if not host_template.get('cluster_name', None): msg = "cluster name is null" raise HTTPNotFound(explanation=msg) params = {'filters':{'cluster_name':host_template['cluster_name']}} templates = registry.host_template_lists_metadata(req.context, **params) hosts_param = [] host_template_used = {} if templates and templates[0]: hosts_param = json.loads(templates[0]['hosts']) for host in hosts_param: if host['name'] == host_template['host_template_name']: host_template_used = host break if not host_template_used: msg = "not host_template %s" % host_template['host_template_name'] raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") if host_template.get('host_id', None): self.get_host_meta_or_404(req, host_template['host_id']) else: msg="host_id is not null" raise HTTPBadRequest(explanation = msg) host_id = host_template['host_id'] params = {'filters':{'name': host_template['cluster_name']}} clusters = registry.get_clusters_detail(req.context, **params) if clusters and clusters[0]: host_template_used['cluster'] = clusters[0]['id'] if host_template_used.has_key('role') and host_template_used['role']: role_id_list = [] host_role_list = [] if host_template_used.has_key('cluster'): params = self._get_query_params(req) role_list = registry.get_roles_detail(req.context, **params) for role_name in role_list: if role_name['cluster_id'] == host_template_used['cluster']: host_role_list = list(host_template_used['role']) if role_name['name'] in host_role_list: role_id_list.append(role_name['id']) host_template_used['role'] = role_id_list if host_template_used.has_key('name'): host_template_used.pop('name') if host_template_used.has_key('dmi_uuid'): host_template_used.pop('dmi_uuid') if host_template_used.has_key('ipmi_user'): host_template_used.pop('ipmi_user') if host_template_used.has_key('ipmi_passwd'): host_template_used.pop('ipmi_passwd') if host_template_used.has_key('ipmi_addr'): host_template_used.pop('ipmi_addr') host_template_interfaces = host_template_used.get('interfaces', None) if host_template_interfaces: template_ether_interface = [interface for interface in host_template_interfaces if interface['type'] == "ether" ] orig_host_meta = registry.get_host_metadata(req.context, host_id) orig_host_interfaces = orig_host_meta.get('interfaces', None) temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ] if len(temp_orig_host_interfaces) != len(template_ether_interface): msg = (_('host_id %s does not match the host_id host_template ' '%s.') % (host_id, host_template['host_template_name'])) raise HTTPBadRequest(explanation = msg) interface_match_flag = 0 for host_template_interface in host_template_interfaces: if host_template_interface['type'] == 'ether': for orig_host_interface in orig_host_interfaces: if orig_host_interface['pci'] == host_template_interface['pci']: interface_match_flag += 1 host_template_interface['mac'] = orig_host_interface['mac'] if host_template_interface.has_key('ip'): host_template_interface.pop('ip') if interface_match_flag != len(template_ether_interface): msg = (_('host_id %s does not match the host ' 'host_template %s.') % (host_id, host_template['host_template_name'])) raise HTTPBadRequest(explanation=msg) host_template_used['interfaces'] = str(host_template_interfaces) host_template = registry.update_host_metadata(req.context, host_id, host_template_used) return {"host_template": host_template}
def get_roles_detail(req): try: roles = registry.get_roles_detail(req.context) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return roles
def template_to_host(self, req, host_template): if not host_template.get('cluster_name', None): msg = "cluster name is null" raise HTTPNotFound(explanation=msg) if host_template.get('host_id', None): host_id = host_template['host_id'] orig_host_meta = self.get_host_meta_or_404(req, host_id) else: msg = "host id which need to template instantiate can't be null" raise HTTPBadRequest(explanation=msg) path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), 'ext') for root, dirs, names in os.walk(path): filename = 'router.py' if filename in names: ext_name = root.split(path)[1].strip('/') ext_func = "%s.api.hosts" % ext_name extension = importutils.import_module('daisy.api.v1.ext.%s' % ext_func) if 'template_to_host' in dir(extension): extension.template_to_host(req, orig_host_meta) params = {'filters': {'cluster_name': host_template['cluster_name']}} templates = registry.host_template_lists_metadata( req.context, **params) hosts_param = [] host_template_used = {} if templates and templates[0]: hosts_param = json.loads(templates[0]['hosts']) for host in hosts_param: if host['name'] == host_template['host_template_name']: host_template_used = host break if not host_template_used: msg = "not host_template %s" % host_template['host_template_name'] raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") params = {'filters': {'name': host_template['cluster_name']}} clusters = registry.get_clusters_detail(req.context, **params) if clusters and clusters[0]: host_template_used['cluster'] = clusters[0]['id'] if 'role' in host_template_used and host_template_used['role']: role_id_list = [] host_role_list = [] if 'cluster' in host_template_used: params = self._get_query_params(req) role_list = registry.get_roles_detail(req.context, **params) for role_name in role_list: if role_name['cluster_id'] == host_template_used[ 'cluster']: host_role_list = list(host_template_used['role']) if role_name['name'] in host_role_list: role_id_list.append(role_name['id']) host_template_used['role'] = role_id_list ignore_common_key_list = ["name", "dmi_uuid", "ipmi_addr"] for ignore_key in ignore_common_key_list: if host_template_used.get(ignore_key, None): host_template_used.pop(ignore_key) ssh_host_flag = self._judge_ssh_host(req, host_template_used['cluster'], host_id) ignore_ssh_key_list = [ "root_disk", "root_lv_size", "swap_lv_size", "isolcpus", "os_version_file", "os_version_id", "root_pwd", "hugepages", "hugepagesize", "discover_mode" ] if ssh_host_flag: for ignore_key in ignore_ssh_key_list: if host_template_used.get(ignore_key, None): host_template_used.pop(ignore_key) daisy_cmn.add_ssh_host_to_cluster_and_assigned_network( req, host_template_used['cluster'], host_id) # ssh host add cluster and assigned network,must get new host data. orig_host_meta = registry.get_host_metadata(req.context, host_id) else: if not host_template_used.get("root_disk", None): raise HTTPBadRequest( explanation="ssh host template can't be used by pxe host") host_template_used['os_status'] = "init" host_template_used['messages'] = "" host_template_used['os_progress'] = 0 host_template_used['description'] = "" host_template_interfaces = host_template_used.get('interfaces', None) if host_template_interfaces: template_ether_interface = [ interface for interface in host_template_interfaces if interface['type'] == "ether" ] template_bond_interface = [ interface for interface in host_template_interfaces if interface['type'] == "bond" ] orig_host_interfaces = orig_host_meta.get('interfaces', None) temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ] if len(temp_orig_host_interfaces) != len(template_ether_interface): msg = (_('host_id %s number of interface ' 'does not match host template' '%s.') % (host_id, host_template['host_template_name'])) raise HTTPBadRequest(explanation=msg) interface_match_flag = 0 host_template_interfaces = \ filter(lambda interface: 'vlan' != interface['type'], host_template_interfaces) for host_template_interface in host_template_interfaces: if host_template_interface['type'] == 'ether': for orig_host_interface in orig_host_interfaces: if orig_host_interface['name'] ==\ host_template_interface['name']: interface_match_flag += 1 host_template_interface['mac'] =\ orig_host_interface['mac'] if 'ip' in host_template_interface and\ ssh_host_flag: host_template_interface['ip'] =\ orig_host_interface['ip'] else: host_template_interface.pop('ip') if orig_host_interface.get('assigned_networks', None) and ssh_host_flag: host_template_interface['assigned_networks']\ = orig_host_interface['assigned_networks'] if host_template_interface['type'] == 'bond': for orig_host_interface in orig_host_interfaces: if orig_host_interface['name'] ==\ host_template_interface['name']: if ssh_host_flag: interface_match_flag += 1 interface_list = ["mac", "slave1", "slave2", "ip"] for interface_key in interface_list: if host_template_interface.get( interface_key, None) and ssh_host_flag: host_template_interface[interface_key]\ = orig_host_interface[interface_key] if 'ip' in host_template_interface and\ not ssh_host_flag: host_template_interface.pop('ip') if orig_host_interface.get('assigned_networks', None) and ssh_host_flag: host_template_interface['assigned_networks']\ = orig_host_interface['assigned_networks'] if host_template_interface['type'] == 'vlan': host_template_interfaces.remove(host_template_interface) if ssh_host_flag: vlan_interfaces = [] for orig_host_interface in orig_host_interfaces: if orig_host_interface['type'] == 'vlan': vlan_interfaces.append(orig_host_interface) host_template_interfaces.extend(vlan_interfaces) if interface_match_flag != (len(template_ether_interface) + len(template_bond_interface)): msg = (_('ssh discover host_id ' 'interface %s does not match the ' 'host_template %s.') % (host_id, host_template['host_template_name'])) raise HTTPBadRequest(explanation=msg) else: if interface_match_flag != len(template_ether_interface): msg = (_('host_id %s interface does not match the ' 'host_template %s.') % (host_id, host_template['host_template_name'])) raise HTTPBadRequest(explanation=msg) host_template_used['interfaces'] = host_template_interfaces try: host_template = registry.update_host_metadata( req.context, host_id, host_template_used) except Exception as e: raise HTTPBadRequest(e.message) return {"host_template": host_template}
def import_template_to_db(self, req, template): """ create cluster """ cluster_id = "" template_cluster = {} cluster_meta = {} template_meta = copy.deepcopy(template) template_name = template_meta.get('template_name', None) cluster_name = template_meta.get('cluster', None) template_params = {'filters': {'name': template_name}} template_list = registry.template_lists_metadata( req.context, **template_params) if template_list: template_cluster = template_list[0] else: msg = "the template %s is not exist" % template_name LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") try: template_content = json.loads(template_cluster['content']) template_content_cluster = template_content['cluster'] template_content_cluster['name'] = cluster_name if template_cluster['hosts']: template_hosts = json.loads(template_cluster['hosts']) template_host_params = {'cluster_name': cluster_name} template_host_list = registry.host_template_lists_metadata( req.context, **template_host_params) if template_host_list: update_template_meta = { "cluster_name": cluster_name, "hosts": json.dumps(template_hosts) } registry.update_host_template_metadata( req.context, template_host_list[0]['id'], update_template_meta) else: template_meta = { "cluster_name": cluster_name, "hosts": json.dumps(template_hosts) } registry.add_host_template_metadata( req.context, template_meta) cluster_params = {'filters': {'name': cluster_name}} clusters = registry.get_clusters_detail(req.context, **cluster_params) if clusters: msg = "the cluster %s is exist" % clusters[0]['name'] LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: if template_content_cluster.get('auto_scale', None) == 1: params = {'filters': ''} clusters_list = registry.get_clusters_detail( req.context, **params) for cluster in clusters_list: if cluster.get('auto_scale', None) == 1: template_content_cluster['auto_scale'] = 0 break if template_cluster.get('type') in utils.SUPPORT_BACKENDS: template_content['cluster'].setdefault( 'target_systems', 'os+%s' % template_cluster['type']) else: msg = 'type in template: "%s" not support' % \ template_cluster LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req) cluster_meta = registry.add_cluster_metadata( req.context, template_content['cluster']) cluster_id = cluster_meta['id'] params = {'filters': {}} networks = registry.get_networks_detail(req.context, cluster_id, **params) template_content_networks = template_content['networks'] for template_content_network in template_content_networks: network_exist = 'false' for network in networks: if template_content_network['name'] == network['name']: update_network_meta = registry.update_network_metadata( req.context, network['id'], template_content_network) network_exist = 'true' if network_exist == 'false': template_content_network['cluster_id'] = cluster_id add_network_meta = registry.add_network_metadata( req.context, template_content_network) params = {'filters': {'cluster_id': cluster_id}} roles = registry.get_roles_detail(req.context, **params) template_content_roles = template_content['roles'] for template_content_role in template_content_roles: role_exist = 'false' del template_content_role['config_set_id'] for role in roles: if template_content_role['name'] == role['name']: update_role_meta = registry.update_role_metadata( req.context, role['id'], template_content_role) role_exist = 'true' if role_exist == 'false': template_content_role['cluster_id'] = cluster_id registry.add_role_metadata(req.context, template_content_role) self._import_cinder_volumes_to_db( req, template_content['cinder_volumes'], roles) if 'neutron_backends' in template_content: self._import_neutron_backends_to_db( req, template_content['neutron_backends'], roles) if 'optical_switchs' in template_content: self._import_optical_switchs_to_db( req, template_content['optical_switchs'], roles) self._import_services_disk_to_db(req, template_content['services_disk'], roles) # add extension content for cluster_template path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), 'ext') for root, dirs, names in os.walk(path): filename = 'router.py' if filename in names: ext_name = root.split(path)[1].strip('/') ext_func = "%s.api.hosts" % ext_name extension = importutils.import_module( 'daisy.api.v1.ext.%s' % ext_func) if 'import_template_to_db_ext' in dir(extension): extension.import_template_to_db_ext(req, cluster_id) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return {"template": cluster_meta}
def import_template_to_db(self, req, template): """ create cluster """ cluster_id = "" template_cluster = {} cluster_meta = {} template_meta = copy.deepcopy(template) template_name = template_meta.get('template_name', None) cluster_name = template_meta.get('cluster', None) template_params = {'filters': {'name': template_name}} template_list = registry.template_lists_metadata( req.context, **template_params) if template_list: template_cluster = template_list[0] else: msg = "the template %s is not exist" % template_name LOG.error(msg) raise HTTPForbidden( explanation=msg, request=req, content_type="text/plain") try: template_content = json.loads(template_cluster['content']) template_content_cluster = template_content['cluster'] template_content_cluster['name'] = cluster_name if template_cluster['hosts']: template_hosts = json.loads(template_cluster['hosts']) template_host_params = {'cluster_name': cluster_name} template_host_list = registry.host_template_lists_metadata( req.context, **template_host_params) if template_host_list: update_template_meta = { "cluster_name": cluster_name, "hosts": json.dumps(template_hosts)} registry.update_host_template_metadata( req.context, template_host_list[0]['id'], update_template_meta) else: template_meta = { "cluster_name": cluster_name, "hosts": json.dumps(template_hosts)} registry.add_host_template_metadata( req.context, template_meta) cluster_params = {'filters': {'name': cluster_name}} clusters = registry.get_clusters_detail( req.context, **cluster_params) if clusters: msg = "the cluster %s is exist" % clusters[0]['name'] LOG.error(msg) raise HTTPForbidden( explanation=msg, request=req, content_type="text/plain") else: if template_content_cluster.get('auto_scale', None) == 1: params = {'filters': ''} clusters_list = registry.get_clusters_detail( req.context, **params) for cluster in clusters_list: if cluster.get('auto_scale', None) == 1: template_content_cluster['auto_scale'] = 0 break if template_cluster.get('type') in utils.SUPPORT_BACKENDS: template_content['cluster'].setdefault( 'target_systems', 'os+%s' % template_cluster['type']) else: msg = 'type in template: "%s" not support' % \ template_cluster LOG.error(msg) raise HTTPBadRequest(explanation=msg, request=req) cluster_meta = registry.add_cluster_metadata( req.context, template_content['cluster']) cluster_id = cluster_meta['id'] params = {'filters': {}} networks = registry.get_networks_detail( req.context, cluster_id, **params) template_content_networks = template_content['networks'] for template_content_network in template_content_networks: network_exist = 'false' for network in networks: if template_content_network['name'] == network['name']: update_network_meta = registry.update_network_metadata( req.context, network['id'], template_content_network) network_exist = 'true' if network_exist == 'false': template_content_network['cluster_id'] = cluster_id add_network_meta = registry.add_network_metadata( req.context, template_content_network) params = {'filters': {'cluster_id': cluster_id}} roles = registry.get_roles_detail(req.context, **params) template_content_roles = template_content['roles'] for template_content_role in template_content_roles: role_exist = 'false' del template_content_role['config_set_id'] for role in roles: if template_content_role['name'] == role['name']: update_role_meta = registry.update_role_metadata( req.context, role['id'], template_content_role) role_exist = 'true' if role_exist == 'false': template_content_role['cluster_id'] = cluster_id registry.add_role_metadata( req.context, template_content_role) self._import_cinder_volumes_to_db( req, template_content['cinder_volumes'], roles) if 'neutron_backends' in template_content: self._import_neutron_backends_to_db( req, template_content['neutron_backends'], roles) if 'optical_switchs' in template_content: self._import_optical_switchs_to_db( req, template_content['optical_switchs'], roles) self._import_services_disk_to_db(req, template_content['services_disk'], roles) # add extension content for cluster_template path = os.path.join(os.path.abspath(os.path.dirname( os.path.realpath(__file__))), 'ext') for root, dirs, names in os.walk(path): filename = 'router.py' if filename in names: ext_name = root.split(path)[1].strip('/') ext_func = "%s.api.hosts" % ext_name extension = importutils.import_module( 'daisy.api.v1.ext.%s' % ext_func) if 'import_template_to_db_ext' in dir(extension): extension.import_template_to_db_ext(req, cluster_id) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return {"template": cluster_meta}
def export_db_to_json(self, req, template): """ Template backend to a cluster. :param req: The WSGI/Webob Request object :raises HTTPBadRequest if x-Template-cluster is missing """ cluster_name = template.get('cluster_name', None) type = template.get('type', None) description = template.get('description', None) template_name = template.get('template_name', None) self._enforce(req, 'export_db_to_json') cinder_volume_list = [] neutron_backend_list = [] service_disk_list = [] optical_switch_list = [] template_content = {} template_json = {} template_id = "" if not type or type == "tecs": try: params = {'filters': {'name': cluster_name}} clusters = registry.get_clusters_detail(req.context, **params) if clusters: cluster_id = clusters[0]['id'] else: msg = "the cluster %s is not exist" % cluster_name LOG.error(msg) raise HTTPForbidden( explanation=msg, request=req, content_type="text/plain") params = {'filters': {'cluster_id': cluster_id}} cluster = registry.get_cluster_metadata( req.context, cluster_id) roles = registry.get_roles_detail(req.context, **params) networks = registry.get_networks_detail( req.context, cluster_id, **params) for role in roles: cinder_volumes = self._get_cinder_volumes(req, role) cinder_volume_list += cinder_volumes services_disk = self._get_services_disk(req, role) service_disk_list += services_disk optical_switchs = self._get_optical_switchs(req, role) optical_switch_list += optical_switchs neutron_backends = self._get_neutron_backends(req, role) neutron_backend_list += neutron_backends if role.get('config_set_id', None): config_set = registry.get_config_set_metadata( req.context, role['config_set_id']) if config_set.get("config", None): role['config_set'] = config_set['config'] del role['cluster_id'] del role['status'] del role['progress'] del role['messages'] del role['config_set_update_progress'] self._del_general_params(role) for network in networks: network_detail = registry.get_network_metadata( req.context, network['id']) if network_detail.get('ip_ranges', None): network['ip_ranges'] = network_detail['ip_ranges'] del network['cluster_id'] self._del_general_params(network) if cluster.get('routers', None): for router in cluster['routers']: del router['cluster_id'] self._del_general_params(router) if cluster.get('logic_networks', None): for logic_network in cluster['logic_networks']: for subnet in logic_network['subnets']: del subnet['logic_network_id'] del subnet['router_id'] self._del_general_params(subnet) del logic_network['cluster_id'] self._del_general_params(logic_network) if cluster.get('nodes', None): del cluster['nodes'] self._del_general_params(cluster) self._del_cluster_params(cluster) cluster['tecs_version_id'] = "" template_content['cluster'] = cluster template_content['cluster_name'] = cluster_name template_content['roles'] = roles template_content['networks'] = networks template_content['cinder_volumes'] = cinder_volume_list template_content['neutron_backends'] = neutron_backend_list template_content['optical_switchs'] = optical_switch_list template_content['services_disk'] = service_disk_list template_json['content'] = json.dumps(template_content) template_json['type'] = 'tecs' template_json['name'] = template_name template_json['description'] = description template_host_params = {'cluster_name': cluster_name} template_hosts = registry.host_template_lists_metadata( req.context, **template_host_params) if template_hosts: template_json['hosts'] = template_hosts[0]['hosts'] else: template_json['hosts'] = "[]" template_params = {'filters': {'name': template_name}} template_list = registry.template_lists_metadata( req.context, **template_params) if template_list: update_template = registry.update_template_metadata( req.context, template_list[0]['id'], template_json) template_id = template_list[0]['id'] else: add_template = registry.add_template_metadata( req.context, template_json) template_id = add_template['id'] if template_id: template_detail = registry.template_detail_metadata( req.context, template_id) self._del_general_params(template_detail) template_detail['content'] = json.loads( template_detail['content']) if template_detail['hosts']: template_detail['hosts'] = json.loads( template_detail['hosts']) tecs_json = daisy_path + "%s.json" % template_name cmd = 'rm -rf %s' % (tecs_json,) daisy_cmn.subprocess_call(cmd) with open(tecs_json, "w+") as fp: json.dump(template_detail, fp, indent=2) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return {"template": template_detail}
def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id): ''' return : host_disk_db_glance_nova_size['disk_size'] = 1024000 host_disk_db_glance_nova_size['db_lv_size'] = 1011 host_disk_db_glance_nova_size['glance_lv_size'] = 1011 host_disk_db_glance_nova_size['nova_lv_size'] = 1011 ''' # import pdb;pdb.set_trace() host_disk_db_glance_nova_size = dict() db_lv_size = list() glance_lv_size = list() nova_lv_size = list() # disk_size = list() host_info = self.get_host_meta_or_404(req, host_id) if host_info: if 'deleted' in host_info and host_info['deleted']: msg = _("Node with identifier %s has been deleted.") % \ host_info[ 'id'] LOG.debug(msg) raise HTTPNotFound(msg) # get host disk infomation host_disk = self._get_host_disk_except_os_disk_by_info(host_info) host_disk_db_glance_nova_size['disk_size'] = host_disk # get role_host db/glance/nova infomation cluster_info = self.get_cluster_meta_or_404(req, cluster_id) if 'cluster' in host_info: # host with cluster if host_info['cluster'] != cluster_info['name']: # type(host_info['cluster']) = list, # type(cluster_info['name']) = str msg = _("Role and hosts belong to different cluster.") LOG.debug(msg) raise HTTPNotFound(msg) else: all_roles = registry.get_roles_detail(req.context) cluster_roles = [ role for role in all_roles if role['cluster_id'] == cluster_id ] # roles infomation saved in cluster_roles if 'role' in host_info and host_info[ 'role']: # host with role for role in cluster_roles: if role['name'] in host_info[ 'role'] and cluster_roles: db_lv_size.append(role.get('db_lv_size', None)) glance_lv_size.append( role.get('glance_lv_size', None)) nova_lv_size.append( role.get('nova_lv_size', None)) if db_lv_size: host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size) else: # host without cluster host_disk_db_glance_nova_size['db_lv_size'] = 0 if glance_lv_size: host_disk_db_glance_nova_size['glance_lv_size'] = max( glance_lv_size) else: host_disk_db_glance_nova_size['glance_lv_size'] = 0 if nova_lv_size: host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size) else: host_disk_db_glance_nova_size['nova_lv_size'] = 0 LOG.warning('--------host(%s)disk_db_glance_nova_size:----- %s' % (host_id, host_disk_db_glance_nova_size)) return host_disk_db_glance_nova_size
def import_template_to_db(self, req, template): cluster_id = "" template_cluster = {} cluster_meta = {} template_meta = copy.deepcopy(template) template_name = template_meta.get('name',None) cluster_name = template_meta.get('cluster',None) template_params = {'filters': {'name':template_name}} template_list = registry.template_lists_metadata(req.context, **template_params) if template_list: template_cluster = template_list[0] else: msg = "the template %s is not exist" % template_name LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") try: template_content = json.loads(template_cluster['content']) template_content_cluster = template_content['cluster'] template_content_cluster['name'] = cluster_name template_content_cluster['networking_parameters'] = str(template_content_cluster['networking_parameters']) template_content_cluster['logic_networks'] = str(template_content_cluster['logic_networks']) template_content_cluster['logic_networks'] = template_content_cluster['logic_networks'].replace("\'true\'","True") template_content_cluster['routers'] = str(template_content_cluster['routers']) if template_cluster['hosts']: template_hosts = json.loads(template_cluster['hosts']) template_host_params = {'cluster_name':cluster_name} template_host_list = registry.host_template_lists_metadata(req.context, **template_host_params) if template_host_list: update_template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} registry.update_host_template_metadata(req.context, template_host_list[0]['id'], update_template_meta) else: template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} registry.add_host_template_metadata(req.context, template_meta) cluster_params = {'filters': {'name':cluster_name}} clusters = registry.get_clusters_detail(req.context, **cluster_params) if clusters: msg = "the cluster %s is exist" % clusters[0]['name'] LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") else: cluster_meta = registry.add_cluster_metadata(req.context, template_content['cluster']) cluster_id = cluster_meta['id'] params = {'filters':{}} networks = registry.get_networks_detail(req.context, cluster_id,**params) template_content_networks = template_content['networks'] for template_content_network in template_content_networks: template_content_network['ip_ranges'] = str(template_content_network['ip_ranges']) network_exist = 'false' for network in networks: if template_content_network['name'] == network['name']: update_network_meta = registry.update_network_metadata(req.context, network['id'], template_content_network) network_exist = 'true' if network_exist == 'false': template_content_network['cluster_id'] = cluster_id add_network_meta = registry.add_network_metadata(req.context, template_content_network) params = {'filters': {'cluster_id':cluster_id}} roles = registry.get_roles_detail(req.context, **params) template_content_roles = template_content['roles'] for template_content_role in template_content_roles: role_exist = 'false' del template_content_role['config_set_id'] for role in roles: if template_content_role['name'] == role['name']: update_role_meta = registry.update_role_metadata(req.context, role['id'], template_content_role) role_exist = 'true' if role_exist == 'false': template_content_role['cluster_id'] = cluster_id add_role_meta = registry.add_role_metadata(req.context, template_content_role) cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) template_content_cinder_volumes = template_content['cinder_volumes'] for template_content_cinder_volume in template_content_cinder_volumes: cinder_volume_exist = 'false' roles = registry.get_roles_detail(req.context, **params) for role in roles: if template_content_cinder_volume['role_id'] == role['name']: template_content_cinder_volume['role_id'] = role['id'] for cinder_volume in cinder_volumes: if template_content_cinder_volume['role_id'] == cinder_volume['role_id']: update_cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume['id'], template_content_cinder_volume) cinder_volume_exist = 'true' if cinder_volume_exist == 'false': add_cinder_volumes = registry.add_cinder_volume_metadata(req.context, template_content_cinder_volume) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return {"template":cluster_meta}
def export_db_to_json(self, req, template): """ Template backend to a cluster. :param req: The WSGI/Webob Request object :raises HTTPBadRequest if x-Template-cluster is missing """ cluster_name = template.get('cluster_name', None) type = template.get('type', None) description = template.get('description', None) template_name = template.get('template_name', None) self._enforce(req, 'export_db_to_json') cinder_volume_list = [] neutron_backend_list = [] service_disk_list = [] optical_switch_list = [] template_content = {} template_json = {} template_id = "" if not type or type == "tecs": try: params = {'filters': {'name': cluster_name}} clusters = registry.get_clusters_detail(req.context, **params) if clusters: cluster_id = clusters[0]['id'] else: msg = "the cluster %s is not exist" % cluster_name LOG.error(msg) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") params = {'filters': {'cluster_id': cluster_id}} cluster = registry.get_cluster_metadata( req.context, cluster_id) roles = registry.get_roles_detail(req.context, **params) networks = registry.get_networks_detail( req.context, cluster_id, **params) for role in roles: cinder_volumes = self._get_cinder_volumes(req, role) cinder_volume_list += cinder_volumes services_disk = self._get_services_disk(req, role) service_disk_list += services_disk optical_switchs = self._get_optical_switchs(req, role) optical_switch_list += optical_switchs neutron_backends = self._get_neutron_backends(req, role) neutron_backend_list += neutron_backends if role.get('config_set_id', None): config_set = registry.get_config_set_metadata( req.context, role['config_set_id']) if config_set.get("config", None): role['config_set'] = config_set['config'] del role['cluster_id'] del role['status'] del role['progress'] del role['messages'] del role['config_set_update_progress'] self._del_general_params(role) for network in networks: network_detail = registry.get_network_metadata( req.context, network['id']) if network_detail.get('ip_ranges', None): network['ip_ranges'] = network_detail['ip_ranges'] del network['cluster_id'] self._del_general_params(network) if cluster.get('routers', None): for router in cluster['routers']: del router['cluster_id'] self._del_general_params(router) if cluster.get('logic_networks', None): for logic_network in cluster['logic_networks']: for subnet in logic_network['subnets']: del subnet['logic_network_id'] del subnet['router_id'] self._del_general_params(subnet) del logic_network['cluster_id'] self._del_general_params(logic_network) if cluster.get('nodes', None): del cluster['nodes'] self._del_general_params(cluster) self._del_cluster_params(cluster) cluster['tecs_version_id'] = "" template_content['cluster'] = cluster template_content['cluster_name'] = cluster_name template_content['roles'] = roles template_content['networks'] = networks template_content['cinder_volumes'] = cinder_volume_list template_content['neutron_backends'] = neutron_backend_list template_content['optical_switchs'] = optical_switch_list template_content['services_disk'] = service_disk_list template_json['content'] = json.dumps(template_content) template_json['type'] = 'tecs' template_json['name'] = template_name template_json['description'] = description template_host_params = {'cluster_name': cluster_name} template_hosts = registry.host_template_lists_metadata( req.context, **template_host_params) if template_hosts: template_json['hosts'] = template_hosts[0]['hosts'] else: template_json['hosts'] = "[]" template_params = {'filters': {'name': template_name}} template_list = registry.template_lists_metadata( req.context, **template_params) if template_list: update_template = registry.update_template_metadata( req.context, template_list[0]['id'], template_json) template_id = template_list[0]['id'] else: add_template = registry.add_template_metadata( req.context, template_json) template_id = add_template['id'] if template_id: template_detail = registry.template_detail_metadata( req.context, template_id) self._del_general_params(template_detail) template_detail['content'] = json.loads( template_detail['content']) if template_detail['hosts']: template_detail['hosts'] = json.loads( template_detail['hosts']) tecs_json = daisy_path + "%s.json" % template_name cmd = 'rm -rf %s' % (tecs_json, ) daisy_cmn.subprocess_call(cmd) with open(tecs_json, "w+") as fp: json.dump(template_detail, fp, indent=2) except exception.Invalid as e: raise HTTPBadRequest(explanation=e.msg, request=req) return {"template": template_detail}