def _run_instance(cluster, node_group, idx, aa_groups, userdata): """Create instance using nova client and persist them into DB.""" session = context.ctx().session name = '%s-%s-%03d' % (cluster.name, node_group.name, idx) # aa_groups: node process -> instance ids aa_ids = [] for node_process in node_group.node_processes: aa_ids += aa_groups.get(node_process) or [] # create instances only at hosts w/ no instances w/ aa-enabled processes hints = {'different_host': list(set(aa_ids))} if aa_ids else None context.model_save(node_group) nova_instance = nova.client().servers.create( name, node_group.get_image_id(), node_group.flavor_id, scheduler_hints=hints, userdata=userdata, key_name=cluster.user_keypair_id) with session.begin(): instance = m.Instance(node_group.id, nova_instance.id, name) node_group.instances.append(instance) session.add(instance) # save instance id to aa_groups to support aa feature for node_process in node_group.node_processes: if node_process in cluster.anti_affinity: aa_group_ids = aa_groups.get(node_process, []) aa_group_ids.append(nova_instance.id) aa_groups[node_process] = aa_group_ids return instance
def scale_cluster(cluster_id, data): cluster = get_cluster(id=cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get("resize_node_groups", []) additional_node_groups = data.get("add_node_groups", []) # the next map is the main object we will work with # to_be_enlarged : {node_group_name: desired_amount_of_instances} to_be_enlarged = {} for ng in existing_node_groups: to_be_enlarged.update({ng["name"]: ng["count"]}) additional = construct_ngs_for_scaling(additional_node_groups) try: context.model_update(cluster, status="Validating") plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception: with excutils.save_and_reraise_exception(): context.model_update(cluster, status="Active") # If we are here validation is successful. # So let's update bd and to_be_enlarged map: for add_n_g in additional: cluster.node_groups.append(add_n_g) to_be_enlarged.update({add_n_g.name: additional[add_n_g]}) context.model_save(cluster) context.spawn(_provision_nodes, cluster_id, to_be_enlarged) return cluster
def _scale_cluster_instances(cluster, node_groups_map, plugin): aa_groups = _generate_anti_affinity_groups(cluster) instances_to_delete = [] node_groups_to_enlarge = [] for node_group in node_groups_map: count = node_groups_map[node_group] if count < node_group.count: instances_to_delete += node_group.instances[count:node_group.count] else: node_groups_to_enlarge.append(node_group) if instances_to_delete: cluster.status = 'Decommissioning' plugin.decommission_nodes(cluster, instances_to_delete) cluster.status = 'Deleting Instances' for instance in instances_to_delete: node_group = instance.node_group node_group.instances.remove(instance) _shutdown_instance(instance) node_group.count -= 1 context.model_save(node_group) instances_to_add = [] if node_groups_to_enlarge: cluster.status = 'Adding Instances' for node_group in node_groups_to_enlarge: count = node_groups_map[node_group] userdata = _generate_user_data_script(node_group) for idx in xrange(node_group.count + 1, count + 1): instance = _run_instance(cluster, node_group, idx, aa_groups, userdata) instances_to_add.append(instance) node_group.count = count return instances_to_add
def _insert_test_object(): t = TestModel() t.test_field = 123 context.model_save(t) return t