def _run_instance(self, cluster, node_group, idx, aa_group=None, old_aa_groups=None): """Create instance using nova client and persist them into DB.""" ctx = context.ctx() name = g.generate_instance_name(cluster.name, node_group.name, idx) userdata = self._generate_user_data_script(node_group, name) if old_aa_groups: # aa_groups: node process -> instance ids aa_ids = [] for node_process in node_group.node_processes: aa_ids += old_aa_groups.get(node_process) or [] # create instances only at hosts w/ no instances # w/ aa-enabled processes hints = {'different_host': sorted(set(aa_ids))} if aa_ids else None else: hints = { 'group': aa_group } if (aa_group and self._need_aa_server_group(node_group)) else None security_groups = self._map_security_groups(node_group.security_groups) nova_kwargs = { 'scheduler_hints': hints, 'userdata': userdata, 'key_name': cluster.user_keypair_id, 'security_groups': security_groups } if CONF.use_neutron: net_id = cluster.neutron_management_network nova_kwargs['nics'] = [{"net-id": net_id, "v4-fixed-ip": ""}] nova_instance = nova.client().servers.create(name, node_group.get_image_id(), node_group.flavor_id, **nova_kwargs) instance_id = conductor.instance_add(ctx, node_group, { "instance_id": nova_instance.id, "instance_name": name }) if old_aa_groups: # save instance id to aa_groups to support aa feature for node_process in node_group.node_processes: if node_process in cluster.anti_affinity: aa_group_ids = old_aa_groups.get(node_process, []) aa_group_ids.append(nova_instance.id) old_aa_groups[node_process] = aa_group_ids return instance_id
def check_cluster_hostnames_lengths(cluster_name, node_groups): for ng in node_groups: longest_hostname = g.generate_instance_name(cluster_name, ng["name"], ng["count"]) longest_hostname += "." longest_hostname += CONF.node_domain if len(longest_hostname) > MAX_HOSTNAME_LENGTH: raise ex.InvalidDataException( _("Composite hostname %(host)s in provisioned cluster exceeds" " maximum limit %(limit)s characters") % {"host": longest_hostname, "limit": MAX_HOSTNAME_LENGTH} )
def check_cluster_hostnames_lengths(cluster_name, node_groups): for ng in node_groups: longest_hostname = g.generate_instance_name(cluster_name, ng['name'], ng['count']) longest_hostname += '.' longest_hostname += CONF.node_domain if len(longest_hostname) > MAX_HOSTNAME_LENGTH: raise ex.InvalidException( "Composite hostname %s in provisioned cluster exceeds " "maximum limit %s characters" % (longest_hostname, MAX_HOSTNAME_LENGTH))
def check_cluster_hostnames_lengths(cluster_name, node_groups): for ng in node_groups: longest_hostname = g.generate_instance_name(cluster_name, ng['name'], ng['count']) longest_hostname += '.' longest_hostname += CONF.node_domain if len(longest_hostname) > MAX_HOSTNAME_LENGTH: raise ex.InvalidDataException( _("Composite hostname %(host)s in provisioned cluster exceeds" " maximum limit %(limit)s characters") % {'host': longest_hostname, 'limit': MAX_HOSTNAME_LENGTH})
def _run_instance(self, cluster, node_group, idx, aa_group=None, old_aa_groups=None): """Create instance using nova client and persist them into DB.""" ctx = context.ctx() name = g.generate_instance_name(cluster.name, node_group.name, idx) userdata = self._generate_user_data_script(node_group, name) if old_aa_groups: # aa_groups: node process -> instance ids aa_ids = [] for node_process in node_group.node_processes: aa_ids += old_aa_groups.get(node_process) or [] # create instances only at hosts w/ no instances # w/ aa-enabled processes hints = {'different_host': sorted(set(aa_ids))} if aa_ids else None else: hints = {'group': aa_group} if ( aa_group and self._need_aa_server_group(node_group)) else None if CONF.use_neutron: net_id = cluster.neutron_management_network nics = [{"net-id": net_id, "v4-fixed-ip": ""}] nova_instance = nova.client().servers.create( name, node_group.get_image_id(), node_group.flavor_id, scheduler_hints=hints, userdata=userdata, key_name=cluster.user_keypair_id, nics=nics, security_groups=node_group.security_groups) else: nova_instance = nova.client().servers.create( name, node_group.get_image_id(), node_group.flavor_id, scheduler_hints=hints, userdata=userdata, key_name=cluster.user_keypair_id, security_groups=node_group.security_groups) instance_id = conductor.instance_add(ctx, node_group, {"instance_id": nova_instance.id, "instance_name": name}) if old_aa_groups: # save instance id to aa_groups to support aa feature for node_process in node_group.node_processes: if node_process in cluster.anti_affinity: aa_group_ids = old_aa_groups.get(node_process, []) aa_group_ids.append(nova_instance.id) old_aa_groups[node_process] = aa_group_ids return instance_id
def _start_instance(self, cluster, node_group, idx, aa_group, old_aa_groups=None): instance_name = g.generate_instance_name( cluster.name, node_group.name, idx) current_instance_info = context.InstanceInfo( cluster.id, None, instance_name, node_group.id) with context.InstanceInfoManager(current_instance_info): instance_id = self._run_instance( cluster, node_group, idx, aa_group=aa_group, old_aa_groups=old_aa_groups) return instance_id
def _get_inst_name(cluster_name, ng_name, index): return g.generate_instance_name(cluster_name, ng_name, index + 1)
def test_generate_instance_name(self): inst_name = "cluster-worker-001" self.assertEqual( inst_name, general.generate_instance_name("cluster", "worker", 1)) self.assertEqual( inst_name, general.generate_instance_name("CLUSTER", "WORKER", 1))